text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
##########################################################################
#
# Copyright 2012 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
'''Sample program for apitrace pickle command.
Run as:
apitrace pickle foo.trace | python unpickle.py
'''
import itertools
import optparse
import sys
import time
import re
import cPickle as pickle
class Visitor:
def __init__(self):
self.dispatch = {}
self.dispatch[type(None)] = self.visitNone
self.dispatch[bool] = self.visitBool
self.dispatch[int] = self.visitInt
self.dispatch[long] = self.visitInt
self.dispatch[float] = self.visitFloat
self.dispatch[str] = self.visitStr
self.dispatch[tuple] = self.visitTuple
self.dispatch[list] = self.visitList
self.dispatch[dict] = self.visitDict
self.dispatch[bytearray] = self.visitByteArray
def visit(self, obj):
method = self.dispatch.get(type(obj), self.visitObj)
return method(obj)
def visitObj(self, obj):
raise NotImplementedError
def visitAtom(self, obj):
return self.visitObj(obj)
def visitNone(self, obj):
return self.visitAtom(obj)
def visitBool(self, obj):
return self.visitAtom(obj)
def visitInt(self, obj):
return self.visitAtom(obj)
def visitFloat(self, obj):
return self.visitAtom(obj)
def visitStr(self, obj):
return self.visitAtom(obj)
def visitIterable(self, obj):
return self.visitObj(obj)
def visitTuple(self, obj):
return self.visitIterable(obj)
def visitList(self, obj):
return self.visitIterable(obj)
def visitDict(self, obj):
raise NotImplementedError
def visitByteArray(self, obj):
raise NotImplementedError
class Dumper(Visitor):
id_re = re.compile('^[_A-Za-z][_A-Za-z0-9]*$')
def visitObj(self, obj):
return repr(obj)
def visitStr(self, obj):
if self.id_re.match(obj):
return obj
else:
return repr(obj)
def visitTuple(self, obj):
return '[' + ', '.join(itertools.imap(self.visit, obj)) + ']'
def visitList(self, obj):
return '(' + ', '.join(itertools.imap(self.visit, obj)) + ')'
def visitByteArray(self, obj):
return 'blob(%u)' % len(obj)
class Hasher(Visitor):
'''Returns a hashable version of the objtree.'''
def visitObj(self, obj):
return obj
def visitAtom(self, obj):
return obj
def visitIterable(self, obj):
return tuple(itertools.imap(self.visit, obj))
def visitByteArray(self, obj):
return str(obj)
class Rebuilder(Visitor):
'''Returns a hashable version of the objtree.'''
def visitAtom(self, obj):
return obj
def visitIterable(self, obj):
changed = False
newItems = []
for oldItem in obj:
newItem = self.visit(oldItem)
if newItem is not oldItem:
changed = True
newItems.append(newItem)
if changed:
klass = type(obj)
return klass(newItems)
else:
return obj
def visitByteArray(self, obj):
return obj
class Call:
def __init__(self, callTuple):
self.no, self.functionName, self.args, self.ret = callTuple
self._hash = None
def __str__(self):
s = self.functionName
if self.no is not None:
s = str(self.no) + ' ' + s
dumper = Dumper()
s += '(' + ', '.join(itertools.imap(dumper.visit, self.args)) + ')'
if self.ret is not None:
s += ' = '
s += dumper.visit(self.ret)
return s
def __eq__(self, other):
return \
self.functionName == other.functionName and \
self.args == other.args and \
self.ret == other.ret
def __hash__(self):
if self._hash is None:
hasher = Hasher()
hashable = hasher.visit(self.functionName), hasher.visit(self.args), hasher.visit(self.ret)
self._hash = hash(hashable)
return self._hash
class Unpickler:
callFactory = Call
def __init__(self, stream):
self.stream = stream
def parse(self):
while self.parseCall():
pass
def parseCall(self):
try:
callTuple = pickle.load(self.stream)
except EOFError:
return False
else:
call = self.callFactory(callTuple)
self.handleCall(call)
return True
def handleCall(self, call):
pass
class Counter(Unpickler):
def __init__(self, stream, verbose = False):
Unpickler.__init__(self, stream)
self.verbose = verbose
self.numCalls = 0
self.functionFrequencies = {}
def parse(self):
Unpickler.parse(self)
functionFrequencies = self.functionFrequencies.items()
functionFrequencies.sort(lambda (name1, freq1), (name2, freq2): cmp(freq1, freq2))
for name, frequency in functionFrequencies:
sys.stdout.write('%8u %s\n' % (frequency, name))
def handleCall(self, call):
if self.verbose:
sys.stdout.write(str(call))
sys.stdout.write('\n')
self.numCalls += 1
try:
self.functionFrequencies[call.functionName] += 1
except KeyError:
self.functionFrequencies[call.functionName] = 1
def main():
optparser = optparse.OptionParser(
usage="\n\tapitrace pickle <trace> | %prog [options]")
optparser.add_option(
'-p', '--profile',
action="store_true", dest="profile", default=False,
help="profile call parsing")
optparser.add_option(
'-v', '--verbose',
action="store_true", dest="verbose", default=False,
help="dump calls to stdout")
(options, args) = optparser.parse_args(sys.argv[1:])
if args:
optparser.error('unexpected arguments')
# Change stdin to binary mode
try:
import msvcrt
except ImportError:
pass
else:
import os
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
startTime = time.time()
parser = Counter(sys.stdin, options.verbose)
parser.parse()
stopTime = time.time()
duration = stopTime - startTime
if options.profile:
sys.stderr.write('Processed %u calls in %.03f secs, at %u calls/sec\n' % (parser.numCalls, duration, parser.numCalls/duration))
if __name__ == '__main__':
main()
| PeterLValve/apitrace | scripts/unpickle.py | Python | mit | 7,712 | [
"VisIt"
] | 3a27dae1fc6678a6e3d2f6c7023213e017717c3456e32202adf264a422acfa16 |
from sfepy.base.base import *
from sfepy.postprocess.utils import mlab
from sfepy.fem import Mesh
from sfepy.fem.meshio import MeshIO, vtk_cell_types, supported_formats
from sfepy.solvers.ts import TimeStepper
from dataset_manager import DatasetManager
from enthought.tvtk.api import tvtk
from enthought.mayavi.sources.vtk_data_source import VTKDataSource
from enthought.pyface.timer.api import Timer
def create_file_source(filename, watch=False, offscreen=True):
"""Factory function to create a file source corresponding to the
given file format."""
kwargs = {'watch' : watch, 'offscreen' : offscreen}
if isinstance(filename, str):
fmt = os.path.splitext(filename)[1]
is_sequence = False
else: # A sequence.
fmt = os.path.splitext(filename[0])[1]
is_sequence = True
fmt = fmt.lower()
if fmt == '.vtk':
# VTK is supported directly by Mayavi, no need to use MeshIO.
if is_sequence:
return VTKSequenceFileSource(filename, **kwargs)
else:
return VTKFileSource(filename, **kwargs)
elif fmt in supported_formats.keys():
if is_sequence:
if fmt == '.h5':
raise ValueError('format .h5 does not support file sequences!')
else:
return GenericSequenceFileSource(filename, **kwargs)
else:
return GenericFileSource(filename, **kwargs)
else:
raise ValueError('unknown file format! (%s)' % fmt)
class FileSource(Struct):
"""General file source."""
def __init__(self, filename, watch=False, offscreen=True):
"""Create a file source using the given file name."""
mlab.options.offscreen = offscreen
self.watch = watch
self.filename = filename
self.reset()
def __call__(self, step=0):
"""Get the file source."""
if self.source is None:
self.source = self.create_source()
if self.watch:
self.timer = Timer(1000, self.poll_file)
return self.source
def reset(self):
"""Reset."""
self.source = None
self.step_range = None
self.notify_obj = None
if self.watch:
self.last_stat = os.stat(self.filename)
self.set_step()
def set_step(self, step=0):
"""Set step of a data sequence."""
self.step = step
def get_step_range(self):
return self.step_range
def file_changed(self):
pass
def setup_notification(self, obj, attr):
"""The attribute 'attr' of the object 'obj' will be set to True
when the source file is watched and changes."""
self.notify_obj = obj
self.notify_attr = attr
def poll_file(self):
"""Check the source file's time stamp and notify the
self.notify_obj in case it changed. Subclasses should implement
the file_changed() method."""
if not self.notify_obj:
return
s = os.stat(self.filename)
if s[-2] == self.last_stat[-2]:
setattr(self.notify_obj, self.notify_attr, False)
else:
self.file_changed()
setattr(self.notify_obj, self.notify_attr, True)
self.last_stat = s
class VTKFileSource(FileSource):
"""A thin wrapper around mlab.pipeline.open()."""
def create_source(self):
"""Create a VTK file source """
return mlab.pipeline.open(self.filename)
def get_bounding_box(self):
bbox = nm.array(self.source.reader.unstructured_grid_output.bounds)
return bbox.reshape((3,2)).T
def set_filename(self, filename, vis_source):
self.filename = filename
vis_source.base_file_name = filename
def get_step_range(self):
return (0, 0)
class VTKSequenceFileSource(VTKFileSource):
"""A thin wrapper around mlab.pipeline.open() for VTK file sequences."""
def create_source(self):
"""Create a VTK file source """
return mlab.pipeline.open(self.filename[0])
def set_filename(self, filename, vis_source):
self.filename = filename
vis_source.base_file_name = filename[self.step]
def get_step_range(self):
return (0, len(self.filename) - 1)
class GenericFileSource(FileSource):
"""File source usable with any format supported by MeshIO classes."""
def __init__(self, *args, **kwargs):
FileSource.__init__(self, *args, **kwargs)
self.io = None
def read_common(self, filename):
self.io = MeshIO.any_from_filename(filename)
self.step_range = (0, self.io.read_last_step())
self.mesh = mesh = Mesh.from_file(filename)
self.n_nod, self.dim = self.mesh.coors.shape
def create_source(self):
"""Create a VTK source from data in a SfePy-supported file."""
if self.io is None:
self.read_common(self.filename)
dataset = self.create_dataset()
try:
out = self.io.read_data(self.step)
except ValueError:
out = None
if out is not None:
self.add_data_to_dataset(dataset, out)
src = VTKDataSource(data=dataset)
# src.print_traits()
# debug()
return src
def get_bounding_box(self):
bbox = self.mesh.get_bounding_box()
if self.dim == 2:
bbox = nm.c_[bbox, [0.0, 0.0]]
return bbox
def set_filename(self, filename, vis_source):
self.filename = filename
self.source = self.create_source()
vis_source.data = self.source.data
def get_step_range(self):
if self.step_range is None:
io = MeshIO.any_from_filename(self.filename)
self.step_range = (0, io.read_last_step())
return self.step_range
def file_changed(self):
self.step_range = (0, self.io.read_last_step())
def create_dataset(self):
"""Create a tvtk.UnstructuredGrid dataset from the Mesh instance of the
file source."""
mesh = self.mesh
n_nod, dim = self.n_nod, self.dim
n_el, n_els, n_e_ps = mesh.n_el, mesh.n_els, mesh.n_e_ps
if dim == 2:
nod_zz = nm.zeros((n_nod, 1), dtype=mesh.coors.dtype)
points = nm.c_[mesh.coors, nod_zz]
else:
points = mesh.coors
dataset = tvtk.UnstructuredGrid(points=points)
cell_types = []
cells = []
offset = [0]
for ig, conn in enumerate(mesh.conns):
cell_types += [vtk_cell_types[mesh.descs[ig]]] * n_els[ig]
nn = nm.array([conn.shape[1]] * n_els[ig])
aux = nm.c_[nn[:,None], conn]
cells.extend(aux.ravel())
offset.extend([aux.shape[1]] * n_els[ig])
cells = nm.array(cells)
cell_types = nm.array(cell_types)
offset = nm.cumsum(offset)[:-1]
cell_array = tvtk.CellArray()
cell_array.set_cells(n_el, cells)
dataset.set_cells(cell_types, offset, cell_array)
return dataset
def add_data_to_dataset(self, dataset, data):
"""Add point and cell data to the dataset."""
dim = self.dim
sym = (dim + 1) * dim / 2
dm = DatasetManager(dataset=dataset)
for key, val in data.iteritems():
vd = val.data
## print vd.shape
if val.mode == 'vertex':
if vd.shape[1] == 1:
aux = vd.reshape((vd.shape[0],))
elif vd.shape[1] == 2:
zz = nm.zeros((vd.shape[0], 1), dtype=vd.dtype)
aux = nm.c_[vd, zz]
elif vd.shape[1] == 3:
aux = vd
else:
raise ValueError('unknown vertex data format! (%s)'\
% vd.shape)
dm.add_array(aux, key, 'point')
elif val.mode == 'cell':
ne, aux, nr, nc = vd.shape
if (nr == 1) and (nc == 1):
aux = vd.reshape((ne,))
elif (nr == dim) and (nc == 1):
if dim == 3:
aux = vd.reshape((ne, dim))
else:
zz = nm.zeros((vd.shape[0], 1), dtype=vd.dtype);
aux = nm.c_[vd.squeeze(), zz]
elif (((nr == sym) or (nr == (dim * dim))) and (nc == 1)) \
or ((nr == dim) and (nc == dim)):
vd = vd.squeeze()
if dim == 3:
if nr == sym:
aux = vd[:,[0,3,4,3,1,5,4,5,2]]
elif nr == (dim * dim):
aux = vd[:,[0,3,4,6,1,5,7,8,2]]
else:
aux = vd.reshape((vd.shape[0], dim*dim))
else:
zz = nm.zeros((vd.shape[0], 1), dtype=vd.dtype);
if nr == sym:
aux = nm.c_[vd[:,[0,2]], zz, vd[:,[2,1]],
zz, zz, zz, zz]
elif nr == (dim * dim):
aux = nm.c_[vd[:,[0,2]], zz, vd[:,[3,1]],
zz, zz, zz, zz]
else:
aux = nm.c_[vd[:,0,[0,1]], zz, vd[:,1,[0,1]],
zz, zz, zz, zz]
dm.add_array(aux, key, 'cell')
class GenericSequenceFileSource(GenericFileSource):
"""File source usable with any format supported by MeshIO classes, with
exception of HDF5 (.h5), for file sequences."""
def create_source(self):
"""Create a VTK source from data in a SfePy-supported file."""
if self.io is None:
self.read_common(self.filename[self.step])
dataset = self.create_dataset()
src = VTKDataSource(data=dataset)
return src
def set_filename(self, filename, vis_source):
self.filename = filename
self.io = None
self.source = self.create_source()
vis_source.data = self.source.data
def get_step_range(self):
return (0, len(self.filename) - 1)
| olivierverdier/sfepy | sfepy/postprocess/sources.py | Python | bsd-3-clause | 10,261 | [
"Mayavi",
"VTK"
] | 460f1d7bb8440f0380df4e4c4495d5d2a232102c1e444fe77b92a9a70c25cadd |
# -*- coding: utf-8 -*-
#/**********************************************************************
#** This program is part of 'MOOSE', the
#** Messaging Object Oriented Simulation Environment.
#** Copyright (C) 2003-2014 Upinder S. Bhalla. and NCBS
#** It is made available under the terms of the
#** GNU Lesser General Public License version 2.1
#** See the file COPYING.LIB for the full notice.
#**********************************************************************/
from __future__ import print_function, division
import os
import random
import time
import pylab
from numpy import random as nprand
import sys
import moose
def make_network():
"""
This snippet sets up a recurrent network of IntFire objects, using
SimpleSynHandlers to deal with spiking events.
It isn't very satisfactory as activity runs down after a while.
It is a good example for using the IntFire, setting up random
connectivity, and using SynHandlers.
"""
size = 1024
dt = 0.2
runsteps = 50
delayMin = 0
delayMax = 4
weightMax = 1
Vmax = 1.0
thresh = 0.4
refractoryPeriod = 0.4
tau = 0.5
connectionProbability = 0.01
random.seed( 123 )
nprand.seed( 456 )
t0 = time.time()
network = moose.IntFire( 'network', size );
syns = moose.SimpleSynHandler( '/network/syns', size );
moose.connect( syns, 'activationOut', network, 'activation', 'OneToOne' )
moose.le( '/network' )
syns.vec.numSynapses = [1] * size
sv = moose.vec( '/network/syns/synapse' )
print(('before connect t = ', time.time() - t0))
mid = moose.connect( network, 'spikeOut', sv, 'addSpike', 'Sparse')
print(('after connect t = ', time.time() - t0))
#print mid.destFields
m2 = moose.element( mid )
m2.setRandomConnectivity( connectionProbability, 5489 )
print(('after setting connectivity, t = ', time.time() - t0))
#network.vec.Vm = [(Vmax*random.random()) for r in range(size)]
network.vec.Vm = nprand.rand( size ) * Vmax
network.vec.thresh = thresh
network.vec.refractoryPeriod = refractoryPeriod
network.vec.tau = tau
numSynVec = syns.vec.numSynapses
print(('Middle of setup, t = ', time.time() - t0))
numTotSyn = sum( numSynVec )
print((numSynVec.size, ', tot = ', numTotSyn, ', numSynVec = ', numSynVec))
for item in syns.vec:
sh = moose.element( item )
sh.synapse.delay = delayMin + (delayMax - delayMin ) * nprand.rand( len( sh.synapse ) )
#sh.synapse.delay = [ (delayMin + random.random() * (delayMax - delayMin ) for r in range( len( sh.synapse ) ) ]
sh.synapse.weight = nprand.rand( len( sh.synapse ) ) * weightMax
print(('after setup, t = ', time.time() - t0))
numStats = 100
stats = moose.SpikeStats( '/stats', numStats )
stats.vec.windowLength = 1 # timesteps to put together.
plots = moose.Table( '/plot', numStats )
convergence = size // numStats
for i in range( numStats ):
for j in range( size//numStats ):
k = i * convergence + j
moose.connect( network.vec[k], 'spikeOut', stats.vec[i], 'addSpike' )
moose.connect( plots, 'requestOut', stats, 'getMean', 'OneToOne' )
#moose.useClock( 0, '/network/syns,/network', 'process' )
moose.useClock( 0, '/network/syns', 'process' )
moose.useClock( 1, '/network', 'process' )
moose.useClock( 2, '/stats', 'process' )
moose.useClock( 3, '/plot', 'process' )
moose.setClock( 0, dt )
moose.setClock( 1, dt )
moose.setClock( 2, dt )
moose.setClock( 3, dt )
moose.setClock( 9, dt )
t1 = time.time()
moose.reinit()
print(('reinit time t = ', time.time() - t1))
network.vec.Vm = nprand.rand( size ) * Vmax
print(('setting Vm , t = ', time.time() - t1))
t1 = time.time()
print('starting')
moose.start(runsteps * dt)
print(('runtime, t = ', time.time() - t1))
print((network.vec.Vm[99:103], network.vec.Vm[900:903]))
t = [i * dt for i in range( plots.vec[0].vector.size )]
i = 0
for p in plots.vec:
pylab.plot( t, p.vector, label=str( i) )
i += 1
pylab.xlabel( "Time (s)" )
pylab.ylabel( "Vm (mV)" )
pylab.legend()
pylab.show()
if __name__ == '__main__':
make_network()
| BhallaLab/moose-examples | snippets/recurrentIntFire.py | Python | gpl-2.0 | 4,293 | [
"MOOSE"
] | bfb09c3a40f5d7de658f80c772fc5773172b7e6827ad410644f01ba3b83a742e |
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2011 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import math
import cPickle
import cocos
from cocos import euclid
import pyglet
from pyglet.gl import *
import copy
class Skin(cocos.cocosnode.CocosNode):
def __init__(self, skeleton):
super(Skin, self).__init__()
self.skeleton = skeleton
class ColorSkin(Skin):
def __init__(self, skeleton, color):
super(ColorSkin, self).__init__(skeleton)
self.color = color
def draw(self):
self.skeleton.propagate_matrix()
glPushMatrix()
self.transform()
self.skeleton.visit_children( lambda bone: self.draw_bone( bone ) )
bones = self.skeleton.visit_children(
lambda bone: (bone.label, bone.parent_matrix*bone.matrix))
bones = dict(bones)
glPopMatrix()
def draw_bone(self, bone):
p1 = bone.get_start()
p2 = bone.get_end()
glColor4ub(*self.color)
glLineWidth(5)
glBegin(GL_LINES)
glVertex2f(*p1)
glVertex2f(*p2)
glEnd()
class BitmapSkin(Skin):
skin_parts = []
def __init__(self, skeleton, skin_def, alpha=255):
super(BitmapSkin, self).__init__(skeleton)
self.alpha = alpha
self.skin_parts = skin_def
self.regenerate()
def move(self, idx, dx, dy):
sp = self.skin_parts
pos = sp[idx][1]
sp[idx] = sp[idx][0], (pos[0]+dx, pos[1]+dy), sp[idx][2], \
sp[idx][3], sp[idx][4], sp[idx][5]
self.regenerate()
def get_control_points(self):
return [ (i, p[0]) for i,p in enumerate(self.skin_parts) ]
def regenerate(self):
# print self.skin_parts
self.parts = [ (name, position, scale,\
pyglet.resource.image(image,flip_y=flip_y, flip_x=flip_x)) \
for name, position, image, flip_x, flip_y, scale
in self.skin_parts ]
def draw(self):
self.skeleton.propagate_matrix()
glPushMatrix()
self.transform()
bones = self.skeleton.visit_children(
lambda bone: (bone.label, bone.parent_matrix*bone.matrix))
bones = dict(bones)
for bname, position, scale, image in self.parts:
matrix = bones[bname]
self.blit_image(matrix, position, scale, image)
glPopMatrix()
def blit_image(self, matrix, position, scale, image):
x, y = image.width*scale, image.height*scale
#dx = self.x + position[0]
#dy = self.y + position[1]
dx, dy = position
glEnable(image.target)
glBindTexture(image.target, image.id)
glPushAttrib(GL_COLOR_BUFFER_BIT)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# blit img
points = [
(-dx, -dy),
(x-dx, -dy),
(x-dx, y-dy),
(-dx, y-dy)
]
a,b,_,c,d,_,e,f,_,g,h,_ = image.texture.tex_coords
textures = [ a,b,c,d,e,f,g,h ]
np = [ matrix*euclid.Point2(*p) for p in points ]
glColor4ub(255,255,255,self.alpha)
glBegin(GL_QUADS)
glTexCoord2f(a,b)
glVertex2f(*np[0])
glTexCoord2f(c,d)
glVertex2f(*np[1])
glTexCoord2f(e,f)
glVertex2f(*np[2])
glTexCoord2f(g,h)
glVertex2f(*np[3])
glEnd()
glColor4ub(255,255,255,255)
#pyglet.graphics.draw(4, GL_QUADS,
# ("v2f", new_points),
# ("t2f", textures),
# ("c4B", [255,255,255,self.alpha]*4),
# )
glPopAttrib()
glDisable(image.target)
def flip(self):
nsp = []
for name, position, image, flip_x, flip_y, scale in self.skin_parts:
im = pyglet.resource.image(image,flip_y=flip_y, flip_x=flip_x)
x = im.width*scale - position[0]
y = position[1]
nsp.append( (name, (x,y), image, not flip_x, flip_y, scale))
self.skin_parts = nsp
self.regenerate()
self.skeleton = self.skeleton.flipped()
class Animate(cocos.actions.IntervalAction):
def init(self, animation, recenter=False, recenter_x=False, recenter_y=False):
if recenter:
recenter_x = recenter_y = True
self.recenter_x = recenter_x
self.recenter_y = recenter_y
self.duration = animation.get_duration()
self.animation = animation
def start(self):
nsk = copy.deepcopy(self.target.skeleton)
if self.recenter_x:
self.target.x += nsk.translation.x
nsk.translation.x = 0
if self.recenter_y:
self.target.y += nsk.translation.y
nsk.translation.y = 0
self.start_skeleton = nsk
def update(self, t):
self.animation.pose(self.target.skeleton, t, self.start_skeleton)
def __reversed__(self):
raise NotImplementedError("gimme some time")
class Skeleton(object):
def __init__(self, bone):
super(Skeleton, self).__init__()
self.bone = bone
self.matrix = euclid.Matrix3.new_identity()
self.translation = euclid.Vector2(0,0)
def flipped(self):
sk = Skeleton(self.bone.flipped())
sk.translation.x = -self.translation.x
sk.translation.y = self.translation.y
sk.matrix = euclid.Matrix3.new_translate( *sk.translation )
return sk
def save(self, name):
f = open(name, "w")
cPickle.dump(self, f)
f.close()
def move(self, dx, dy):
self.matrix.translate(dx, dy)
self.translation.x += dx
self.translation.y += dy
def propagate_matrix(self):
def visit(matrix, child):
child.parent_matrix = matrix
matrix = matrix * child.matrix
for c in child.children:
visit(matrix, c)
visit(self.matrix, self.bone)
def visit_children(self, func):
result = []
def inner(bone):
result.append( func( bone ) )
for b in bone.children:
inner(b)
inner(self.bone)
return result
def get_control_points(self):
points = [self]
self.propagate_matrix()
points += self.visit_children( lambda bone: bone )
return points
def interpolated_to(self, next, delta):
sk = Skeleton(self.bone.interpolated_to(next.bone, delta))
sk.translation = (next.translation-self.translation) * delta + self.translation
sk.matrix = euclid.Matrix3.new_translate( *sk.translation )
return sk
def pose_from(self, other):
self.matrix = other.matrix
self.translation = other.translation
self.bone = copy.deepcopy(other.bone)
class Bone(object):
def __init__(self, label, size, rotation, translation):
self.size = size
self.label = label
self.children = []
self.matrix = euclid.Matrix3.new_translate(*translation) * \
euclid.Matrix3.new_rotate( math.radians(rotation) )
self.parent_matrix = euclid.Matrix3.new_identity()
self.translation = euclid.Point2(*translation)
self.rotation = math.radians(rotation)
def move(self, dx, dy):
self.translation.x += dx
self.translation.y += dy
self.matrix = euclid.Matrix3.new_translate(*self.translation) * \
euclid.Matrix3.new_rotate( self.rotation)
def flipped(self):
bone = Bone(self.label, self.size, -math.degrees(self.rotation),
(-self.translation[0], self.translation[1]))
for b in self.children:
bone.add( b.flipped() )
return bone
def rotate(self, angle):
self.rotation += angle
self.matrix.rotate( angle )
def add(self, bone):
self.children.append(bone)
return self
def get_end(self):
return self.parent_matrix * self.matrix * euclid.Point2(0, -self.size)
def get_start(self):
return self.parent_matrix * self.matrix * euclid.Point2(0, 0)
def interpolated_to(self, next, delta):
ea = next.rotation%(math.pi*2)
sa = self.rotation %(math.pi*2)
angle = ((ea%(math.pi*2)) - (sa%(math.pi*2)))
if angle > math.pi:
angle = -math.pi*2+angle
if angle < -math.pi:
angle = math.pi*2+angle
nr = ( sa + angle * delta ) % (math.pi*2)
nr = math.degrees( nr )
bone = Bone(self.label, self.size, nr, self.translation)
for i, c in enumerate(self.children):
nc = c.interpolated_to(next.children[i], delta)
bone.add( nc )
return bone
def dump(self, depth=0):
print "-"*depth, self
for c in self.children:
c.dump(depth+1)
def repr(self, depth=0):
repr = " "*depth*4 + "Bone('%s', %s, %s, %s)"%(
self.label, self.size, math.degrees(self.rotation), self.translation
)
for c in self.children:
repr += " "*depth*4 +".add(\n" + c.repr(depth+1) + ")"
repr += "\n"
return repr
class Animation(object):
def __init__(self, skeleton):
self.frames = {}
self.position = 0
self.skeleton = skeleton
def flipped(self):
c = copy.deepcopy(self)
for t, sk in c.frames.items():
c.frames[t] = sk.flipped()
return c
def pose(self, who, t, start):
dt = t * self.get_duration()
self.position = dt
ct, curr = self.get_keyframe()
#print who.tranlation
# if we are in a keyframe, pose that
if curr:
who.pose_from( curr )
return
# find previous, if not, use start
pt, prev = self.get_keyframe(-1)
if not prev:
prev = start
pt = 0
# find next, if not, pose at prev
nt, next = self.get_keyframe(1)
if not next:
who.pose_from( prev )
return
# we find the dt betwen prev and next and pose from it
ft = (nt-dt)/(nt-pt)
who.pose_from( next.interpolated_to( prev, ft ) )
def get_duration(self):
if self.frames:
return max(max( self.frames ), self.position )
else:
return self.position
def get_markers(self):
return self.frames.keys()
def get_position(self):
return self.position
def get_keyframe(self, offset=0):
if offset == 0:
if self.position in self.frames:
return self.position, self.frames[self.position]
else:
return None, None
elif offset < 0:
prevs = [ t for t in self.frames if t < self.position ]
prevs.sort()
if abs(offset) <= len(prevs):
return prevs[offset], self.frames[prevs[offset]]
else:
return None, None
elif offset > 0:
next = [ t for t in self.frames if t > self.position ]
next.sort()
if abs(offset) <= len(next):
return next[offset-1], self.frames[next[offset-1]]
else:
return None, None
def next_keyframe(self):
next = [ t for t in self.frames if t > self.position ]
if not next:
return False
self.position = min(next)
return True
def prev_keyframe(self):
prevs = [ t for t in self.frames if t < self.position ]
if not prevs:
return False
self.position = max(prevs)
return True
def move_position(self, delta):
self.position = max(self.position+delta, 0)
return True
def move_start(self):
self.position = 0
return True
def move_end(self):
if self.frames:
self.position = max( self.frames )
else:
self.position = 0
return True
def insert_keyframe(self):
if self.position not in self.frames:
t, sk = self.get_keyframe(-1)
if not sk:
sk = self.skeleton
self.frames[ self.position ] = copy.deepcopy(sk)
return True
return False
def remove_keyframe(self):
if self.position in self.frames:
del self.frames[ self.position ]
return True
return False
def insert_time(self, delta):
new_frames = {}
for t, sk in sorted(self.frames.items()):
if t >= self.position:
t += delta
new_frames[ t ] = sk
self.frames = new_frames
def delete_time(self, delta):
for t in self.frames:
if self.position <= t < self.position + delta:
return False
new_frames = {}
for t, sk in sorted(self.frames.items()):
if t > self.position:
t -= delta
new_frames[ t ] = sk
self.frames = new_frames
| eevee/cocos2d-mirror | cocos/skeleton.py | Python | bsd-3-clause | 14,706 | [
"VisIt"
] | 61a97ee9c4a71cf4c4a401621b2f8957c6b99f6ff69a3b560598070752c749ba |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of device_utils.py (mostly DeviceUtils).
"""
# pylint: disable=protected-access
# pylint: disable=unused-argument
import json
import logging
import os
import stat
import unittest
from devil import devil_env
from devil.android import device_errors
from devil.android import device_signal
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.sdk import intent
from devil.android.sdk import keyevent
from devil.android.sdk import version_codes
from devil.utils import cmd_helper
from devil.utils import mock_calls
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
class AnyStringWith(object):
def __init__(self, value):
self._value = value
def __eq__(self, other):
return self._value in other
def __repr__(self):
return '<AnyStringWith: %s>' % self._value
class _MockApkHelper(object):
def __init__(self, path, package_name, perms=None):
self.path = path
self.package_name = package_name
self.perms = perms
def GetPackageName(self):
return self.package_name
def GetPermissions(self):
return self.perms
class _MockMultipleDevicesError(Exception):
pass
class DeviceUtilsInitTest(unittest.TestCase):
def testInitWithStr(self):
serial_as_str = str('0123456789abcdef')
d = device_utils.DeviceUtils('0123456789abcdef')
self.assertEqual(serial_as_str, d.adb.GetDeviceSerial())
def testInitWithUnicode(self):
serial_as_unicode = unicode('fedcba9876543210')
d = device_utils.DeviceUtils(serial_as_unicode)
self.assertEqual(serial_as_unicode, d.adb.GetDeviceSerial())
def testInitWithAdbWrapper(self):
serial = '123456789abcdef0'
a = adb_wrapper.AdbWrapper(serial)
d = device_utils.DeviceUtils(a)
self.assertEqual(serial, d.adb.GetDeviceSerial())
def testInitWithMissing_fails(self):
with self.assertRaises(ValueError):
device_utils.DeviceUtils(None)
with self.assertRaises(ValueError):
device_utils.DeviceUtils('')
class DeviceUtilsGetAVDsTest(mock_calls.TestCase):
def testGetAVDs(self):
mocked_attrs = {
'android_sdk': '/my/sdk/path'
}
with mock.patch('devil.devil_env._Environment.LocalPath',
mock.Mock(side_effect=lambda a: mocked_attrs[a])):
with self.assertCall(
mock.call.devil.utils.cmd_helper.GetCmdOutput(
[mock.ANY, 'list', 'avd']),
'Available Android Virtual Devices:\n'
' Name: my_android5.0\n'
' Path: /some/path/to/.android/avd/my_android5.0.avd\n'
' Target: Android 5.0 (API level 21)\n'
' Tag/ABI: default/x86\n'
' Skin: WVGA800\n'):
self.assertEquals(['my_android5.0'], device_utils.GetAVDs())
class DeviceUtilsRestartServerTest(mock_calls.TestCase):
@mock.patch('time.sleep', mock.Mock())
def testRestartServer_succeeds(self):
with self.assertCalls(
mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.KillServer(),
(mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutput(
['pgrep', 'adb']),
(1, '')),
mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.StartServer(),
(mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutput(
['pgrep', 'adb']),
(1, '')),
(mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutput(
['pgrep', 'adb']),
(0, '123\n'))):
device_utils.RestartServer()
class MockTempFile(object):
def __init__(self, name='/tmp/some/file'):
self.file = mock.MagicMock(spec=file)
self.file.name = name
self.file.name_quoted = cmd_helper.SingleQuote(name)
def __enter__(self):
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def name(self):
return self.file.name
class _PatchedFunction(object):
def __init__(self, patched=None, mocked=None):
self.patched = patched
self.mocked = mocked
def _AdbWrapperMock(test_serial, is_ready=True):
adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
adb.__str__ = mock.Mock(return_value=test_serial)
adb.GetDeviceSerial.return_value = test_serial
adb.is_ready = is_ready
return adb
class DeviceUtilsTest(mock_calls.TestCase):
def setUp(self):
self.adb = _AdbWrapperMock('0123456789abcdef')
self.device = device_utils.DeviceUtils(
self.adb, default_timeout=10, default_retries=0)
self.watchMethodCalls(self.call.adb, ignore=['GetDeviceSerial'])
def AdbCommandError(self, args=None, output=None, status=None, msg=None):
if args is None:
args = ['[unspecified]']
return mock.Mock(side_effect=device_errors.AdbCommandFailedError(
args, output, status, msg, str(self.device)))
def CommandError(self, msg=None):
if msg is None:
msg = 'Command failed'
return mock.Mock(side_effect=device_errors.CommandFailedError(
msg, str(self.device)))
def ShellError(self, output=None, status=1):
def action(cmd, *args, **kwargs):
raise device_errors.AdbShellCommandFailedError(
cmd, output, status, str(self.device))
if output is None:
output = 'Permission denied\n'
return action
def TimeoutError(self, msg=None):
if msg is None:
msg = 'Operation timed out'
return mock.Mock(side_effect=device_errors.CommandTimeoutError(
msg, str(self.device)))
def EnsureCacheInitialized(self, props=None, sdcard='/sdcard'):
props = props or []
ret = [sdcard, 'TOKEN'] + props
return (self.call.device.RunShellCommand(
AnyStringWith('getprop'),
shell=True, check_return=True, large_output=True), ret)
class DeviceUtilsEqTest(DeviceUtilsTest):
def testEq_equal_deviceUtils(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdef'))
self.assertTrue(self.device == other)
self.assertTrue(other == self.device)
def testEq_equal_adbWrapper(self):
other = adb_wrapper.AdbWrapper('0123456789abcdef')
self.assertTrue(self.device == other)
self.assertTrue(other == self.device)
def testEq_equal_string(self):
other = '0123456789abcdef'
self.assertTrue(self.device == other)
self.assertTrue(other == self.device)
def testEq_devicesNotEqual(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdee'))
self.assertFalse(self.device == other)
self.assertFalse(other == self.device)
def testEq_identity(self):
self.assertTrue(self.device == self.device)
def testEq_serialInList(self):
devices = [self.device]
self.assertTrue('0123456789abcdef' in devices)
class DeviceUtilsLtTest(DeviceUtilsTest):
def testLt_lessThan(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('ffffffffffffffff'))
self.assertTrue(self.device < other)
self.assertTrue(other > self.device)
def testLt_greaterThan_lhs(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0000000000000000'))
self.assertFalse(self.device < other)
self.assertFalse(other > self.device)
def testLt_equal(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdef'))
self.assertFalse(self.device < other)
self.assertFalse(other > self.device)
def testLt_sorted(self):
devices = [
device_utils.DeviceUtils(_AdbWrapperMock('ffffffffffffffff')),
device_utils.DeviceUtils(_AdbWrapperMock('0000000000000000')),
]
sorted_devices = sorted(devices)
self.assertEquals('0000000000000000',
sorted_devices[0].adb.GetDeviceSerial())
self.assertEquals('ffffffffffffffff',
sorted_devices[1].adb.GetDeviceSerial())
class DeviceUtilsStrTest(DeviceUtilsTest):
def testStr_returnsSerial(self):
with self.assertCalls(
(self.call.adb.GetDeviceSerial(), '0123456789abcdef')):
self.assertEqual('0123456789abcdef', str(self.device))
class DeviceUtilsIsOnlineTest(DeviceUtilsTest):
def testIsOnline_true(self):
with self.assertCall(self.call.adb.GetState(), 'device'):
self.assertTrue(self.device.IsOnline())
def testIsOnline_false(self):
with self.assertCall(self.call.adb.GetState(), 'offline'):
self.assertFalse(self.device.IsOnline())
def testIsOnline_error(self):
with self.assertCall(self.call.adb.GetState(), self.CommandError()):
self.assertFalse(self.device.IsOnline())
class DeviceUtilsHasRootTest(DeviceUtilsTest):
def testHasRoot_true(self):
with self.patch_call(self.call.device.product_name,
return_value='notasailfish'), (
self.assertCall(self.call.adb.Shell('ls /root'), 'foo\n')):
self.assertTrue(self.device.HasRoot())
def testhasRootSpecial_true(self):
with self.patch_call(self.call.device.product_name,
return_value='sailfish'), (
self.assertCall(self.call.adb.Shell('getprop service.adb.root'),
'1\n')):
self.assertTrue(self.device.HasRoot())
def testHasRoot_false(self):
with self.patch_call(self.call.device.product_name,
return_value='notasailfish'), (
self.assertCall(self.call.adb.Shell('ls /root'),
self.ShellError())):
self.assertFalse(self.device.HasRoot())
def testHasRootSpecial_false(self):
with self.patch_call(self.call.device.product_name,
return_value='sailfish'), (
self.assertCall(self.call.adb.Shell('getprop service.adb.root'),
'\n')):
self.assertFalse(self.device.HasRoot())
class DeviceUtilsEnableRootTest(DeviceUtilsTest):
def testEnableRoot_succeeds(self):
with self.assertCalls(
self.call.adb.Root(),
self.call.adb.WaitForDevice(),
(self.call.device.GetProp('service.adb.root', cache=False), '1')):
self.device.EnableRoot()
def testEnableRoot_userBuild(self):
with self.assertCalls(
(self.call.adb.Root(), self.AdbCommandError()),
(self.call.device.IsUserBuild(), True)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.EnableRoot()
def testEnableRoot_rootFails(self):
with self.assertCalls(
(self.call.adb.Root(), self.AdbCommandError()),
(self.call.device.IsUserBuild(), False)):
with self.assertRaises(device_errors.AdbCommandFailedError):
self.device.EnableRoot()
class DeviceUtilsIsUserBuildTest(DeviceUtilsTest):
def testIsUserBuild_yes(self):
with self.assertCall(
self.call.device.GetProp('ro.build.type', cache=True), 'user'):
self.assertTrue(self.device.IsUserBuild())
def testIsUserBuild_no(self):
with self.assertCall(
self.call.device.GetProp('ro.build.type', cache=True), 'userdebug'):
self.assertFalse(self.device.IsUserBuild())
class DeviceUtilsGetExternalStoragePathTest(DeviceUtilsTest):
def testGetExternalStoragePath_succeeds(self):
with self.assertCalls(
self.EnsureCacheInitialized(sdcard='/fake/storage/path')):
self.assertEquals('/fake/storage/path',
self.device.GetExternalStoragePath())
def testGetExternalStoragePath_fails(self):
with self.assertCalls(
self.EnsureCacheInitialized(sdcard='')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetExternalStoragePath()
class DeviceUtilsGetApplicationPathsInternalTest(DeviceUtilsTest):
def testGetApplicationPathsInternal_exists(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.device.RunShellCommand(
['pm', 'path', 'android'], check_return=True),
['package:/path/to/android.apk'])):
self.assertEquals(['/path/to/android.apk'],
self.device._GetApplicationPathsInternal('android'))
def testGetApplicationPathsInternal_notExists(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.device.RunShellCommand(
['pm', 'path', 'not.installed.app'], check_return=True),
'')):
self.assertEquals([],
self.device._GetApplicationPathsInternal('not.installed.app'))
def testGetApplicationPathsInternal_garbageFirstLine(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.device.RunShellCommand(
['pm', 'path', 'android'], check_return=True),
['garbage first line'])):
with self.assertRaises(device_errors.CommandFailedError):
self.device._GetApplicationPathsInternal('android')
def testGetApplicationPathsInternal_fails(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.device.RunShellCommand(
['pm', 'path', 'android'], check_return=True),
self.CommandError('ERROR. Is package manager running?\n'))):
with self.assertRaises(device_errors.CommandFailedError):
self.device._GetApplicationPathsInternal('android')
class DeviceUtils_GetApplicationVersionTest(DeviceUtilsTest):
def test_GetApplicationVersion_exists(self):
with self.assertCalls(
(self.call.adb.Shell('dumpsys package com.android.chrome'),
'Packages:\n'
' Package [com.android.chrome] (3901ecfb):\n'
' userId=1234 gids=[123, 456, 789]\n'
' pkg=Package{1fecf634 com.android.chrome}\n'
' versionName=45.0.1234.7\n')):
self.assertEquals('45.0.1234.7',
self.device.GetApplicationVersion('com.android.chrome'))
def test_GetApplicationVersion_notExists(self):
with self.assertCalls(
(self.call.adb.Shell('dumpsys package com.android.chrome'), '')):
self.assertEquals(None,
self.device.GetApplicationVersion('com.android.chrome'))
def test_GetApplicationVersion_fails(self):
with self.assertCalls(
(self.call.adb.Shell('dumpsys package com.android.chrome'),
'Packages:\n'
' Package [com.android.chrome] (3901ecfb):\n'
' userId=1234 gids=[123, 456, 789]\n'
' pkg=Package{1fecf634 com.android.chrome}\n')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetApplicationVersion('com.android.chrome')
class DeviceUtilsGetApplicationDataDirectoryTest(DeviceUtilsTest):
def testGetApplicationDataDirectory_exists(self):
with self.assertCall(
self.call.device._RunPipedShellCommand(
'pm dump foo.bar.baz | grep dataDir='),
['dataDir=/data/data/foo.bar.baz']):
self.assertEquals(
'/data/data/foo.bar.baz',
self.device.GetApplicationDataDirectory('foo.bar.baz'))
def testGetApplicationDataDirectory_notExists(self):
with self.assertCall(
self.call.device._RunPipedShellCommand(
'pm dump foo.bar.baz | grep dataDir='),
self.ShellError()):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetApplicationDataDirectory('foo.bar.baz')
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsWaitUntilFullyBootedTest(DeviceUtilsTest):
def testWaitUntilFullyBooted_succeedsNoWifi(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal('android',
skip_cache=True),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1')):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_succeedsWithWifi(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal('android',
skip_cache=True),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'),
'stuff\nWi-Fi is enabled\nmore stuff\n')):
self.device.WaitUntilFullyBooted(wifi=True)
def testWaitUntilFullyBooted_deviceNotInitiallyAvailable(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal('android',
skip_cache=True),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1')):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_deviceBrieflyOffline(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal('android',
skip_cache=True),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False),
self.AdbCommandError()),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1')):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_sdCardReadyFails_noPath(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.CommandError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_sdCardReadyFails_notExists(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), self.ShellError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), self.ShellError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_devicePmFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal('android',
skip_cache=True),
self.CommandError()),
# pm_ready
(self.call.device._GetApplicationPathsInternal('android',
skip_cache=True),
self.CommandError()),
# pm_ready
(self.call.device._GetApplicationPathsInternal('android',
skip_cache=True),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_bootFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal('android',
skip_cache=True),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '0'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '0'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_wifiFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal('android',
skip_cache=True),
['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), 'stuff\nmore stuff\n'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), 'stuff\nmore stuff\n'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=True)
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsRebootTest(DeviceUtilsTest):
def testReboot_nonBlocking(self):
with self.assertCalls(
self.call.adb.Reboot(),
(self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False)):
self.device.Reboot(block=False)
def testReboot_blocking(self):
with self.assertCalls(
self.call.adb.Reboot(),
(self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False),
self.call.device.WaitUntilFullyBooted(wifi=False)):
self.device.Reboot(block=True)
def testReboot_blockUntilWifi(self):
with self.assertCalls(
self.call.adb.Reboot(),
(self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False),
self.call.device.WaitUntilFullyBooted(wifi=True)):
self.device.Reboot(block=True, wifi=True)
class DeviceUtilsInstallTest(DeviceUtilsTest):
mock_apk = _MockApkHelper('/fake/test/app.apk', 'test.package', ['p1'])
def testInstall_noPriorInstall(self):
with self.patch_call(self.call.device.build_version_sdk, return_value=23):
with self.assertCalls(
(mock.call.os.path.exists('/fake/test/app.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'), []),
self.call.adb.Install('/fake/test/app.apk', reinstall=False,
allow_downgrade=False),
(self.call.device.GrantPermissions('test.package', ['p1']), [])):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_permissionsPreM(self):
with self.patch_call(self.call.device.build_version_sdk, return_value=20):
with self.assertCalls(
(mock.call.os.path.exists('/fake/test/app.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'), []),
(self.call.adb.Install('/fake/test/app.apk', reinstall=False,
allow_downgrade=False))):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_findPermissions(self):
with self.patch_call(self.call.device.build_version_sdk, return_value=23):
with self.assertCalls(
(mock.call.os.path.exists('/fake/test/app.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'), []),
(self.call.adb.Install('/fake/test/app.apk', reinstall=False,
allow_downgrade=False)),
(self.call.device.GrantPermissions('test.package', ['p1']), [])):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_passPermissions(self):
with self.assertCalls(
(mock.call.os.path.exists('/fake/test/app.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'), []),
(self.call.adb.Install('/fake/test/app.apk', reinstall=False,
allow_downgrade=False)),
(self.call.device.GrantPermissions('test.package', ['p1', 'p2']), [])):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0,
permissions=['p1', 'p2'])
def testInstall_differentPriorInstall(self):
with self.assertCalls(
(mock.call.os.path.exists('/fake/test/app.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'),
['/fake/data/app/test.package.apk']),
(self.call.device._ComputeStaleApks('test.package',
['/fake/test/app.apk']),
(['/fake/test/app.apk'], None)),
self.call.device.Uninstall('test.package'),
self.call.adb.Install('/fake/test/app.apk', reinstall=False,
allow_downgrade=False)):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0,
permissions=[])
def testInstall_differentPriorInstall_reinstall(self):
with self.assertCalls(
(mock.call.os.path.exists('/fake/test/app.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'),
['/fake/data/app/test.package.apk']),
(self.call.device._ComputeStaleApks('test.package',
['/fake/test/app.apk']),
(['/fake/test/app.apk'], None)),
self.call.adb.Install('/fake/test/app.apk', reinstall=True,
allow_downgrade=False)):
self.device.Install(DeviceUtilsInstallTest.mock_apk,
reinstall=True, retries=0, permissions=[])
def testInstall_identicalPriorInstall_reinstall(self):
with self.assertCalls(
(mock.call.os.path.exists('/fake/test/app.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'),
['/fake/data/app/test.package.apk']),
(self.call.device._ComputeStaleApks('test.package',
['/fake/test/app.apk']),
([], None)),
(self.call.device.ForceStop('test.package'))):
self.device.Install(DeviceUtilsInstallTest.mock_apk,
reinstall=True, retries=0, permissions=[])
def testInstall_missingApk(self):
with self.assertCalls(
(mock.call.os.path.exists('/fake/test/app.apk'), False)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_fails(self):
with self.assertCalls(
(mock.call.os.path.exists('/fake/test/app.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'), []),
(self.call.adb.Install('/fake/test/app.apk', reinstall=False,
allow_downgrade=False),
self.CommandError('Failure\r\n'))):
with self.assertRaises(device_errors.CommandFailedError):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_downgrade(self):
with self.assertCalls(
(mock.call.os.path.exists('/fake/test/app.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'),
['/fake/data/app/test.package.apk']),
(self.call.device._ComputeStaleApks('test.package',
['/fake/test/app.apk']),
(['/fake/test/app.apk'], None)),
self.call.adb.Install('/fake/test/app.apk', reinstall=True,
allow_downgrade=True)):
self.device.Install(DeviceUtilsInstallTest.mock_apk,
reinstall=True, retries=0, permissions=[], allow_downgrade=True)
class DeviceUtilsInstallSplitApkTest(DeviceUtilsTest):
mock_apk = _MockApkHelper('base.apk', 'test.package', ['p1'])
def testInstallSplitApk_noPriorInstall(self):
with self.assertCalls(
(self.call.device._CheckSdkLevel(21)),
(mock.call.devil.android.sdk.split_select.SelectSplits(
self.device, 'base.apk',
['split1.apk', 'split2.apk', 'split3.apk'],
allow_cached_props=False),
['split2.apk']),
(mock.call.os.path.exists('base.apk'), True),
(mock.call.os.path.exists('split2.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'), []),
(self.call.adb.InstallMultiple(
['base.apk', 'split2.apk'], partial=None, reinstall=False,
allow_downgrade=False))):
self.device.InstallSplitApk(DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk', 'split3.apk'], permissions=[], retries=0)
def testInstallSplitApk_partialInstall(self):
with self.assertCalls(
(self.call.device._CheckSdkLevel(21)),
(mock.call.devil.android.sdk.split_select.SelectSplits(
self.device, 'base.apk',
['split1.apk', 'split2.apk', 'split3.apk'],
allow_cached_props=False),
['split2.apk']),
(mock.call.os.path.exists('base.apk'), True),
(mock.call.os.path.exists('split2.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'),
['base-on-device.apk', 'split2-on-device.apk']),
(self.call.device._ComputeStaleApks('test.package',
['base.apk', 'split2.apk']),
(['split2.apk'], None)),
(self.call.adb.InstallMultiple(
['split2.apk'], partial='test.package', reinstall=True,
allow_downgrade=False))):
self.device.InstallSplitApk(DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk', 'split3.apk'],
reinstall=True, permissions=[], retries=0)
def testInstallSplitApk_downgrade(self):
with self.assertCalls(
(self.call.device._CheckSdkLevel(21)),
(mock.call.devil.android.sdk.split_select.SelectSplits(
self.device, 'base.apk',
['split1.apk', 'split2.apk', 'split3.apk'],
allow_cached_props=False),
['split2.apk']),
(mock.call.os.path.exists('base.apk'), True),
(mock.call.os.path.exists('split2.apk'), True),
(self.call.device._GetApplicationPathsInternal('test.package'),
['base-on-device.apk', 'split2-on-device.apk']),
(self.call.device._ComputeStaleApks('test.package',
['base.apk', 'split2.apk']),
(['split2.apk'], None)),
(self.call.adb.InstallMultiple(
['split2.apk'], partial='test.package', reinstall=True,
allow_downgrade=True))):
self.device.InstallSplitApk(DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk', 'split3.apk'],
reinstall=True, permissions=[], retries=0,
allow_downgrade=True)
def testInstallSplitApk_missingSplit(self):
with self.assertCalls(
(self.call.device._CheckSdkLevel(21)),
(mock.call.devil.android.sdk.split_select.SelectSplits(
self.device, 'base.apk',
['split1.apk', 'split2.apk', 'split3.apk'],
allow_cached_props=False),
['split2.apk']),
(mock.call.os.path.exists('base.apk'), True),
(mock.call.os.path.exists('split2.apk'), False)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.InstallSplitApk(DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk', 'split3.apk'], permissions=[],
retries=0)
class DeviceUtilsUninstallTest(DeviceUtilsTest):
def testUninstall_callsThrough(self):
with self.assertCalls(
(self.call.device._GetApplicationPathsInternal('test.package'),
['/path.apk']),
self.call.adb.Uninstall('test.package', True)):
self.device.Uninstall('test.package', True)
def testUninstall_noop(self):
with self.assertCalls(
(self.call.device._GetApplicationPathsInternal('test.package'), [])):
self.device.Uninstall('test.package', True)
class DeviceUtilsSuTest(DeviceUtilsTest):
def testSu_preM(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP_MR1):
self.assertEquals('su -c foo', self.device._Su('foo'))
def testSu_mAndAbove(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
self.assertEquals('su 0 foo', self.device._Su('foo'))
class DeviceUtilsRunShellCommandTest(DeviceUtilsTest):
def setUp(self):
super(DeviceUtilsRunShellCommandTest, self).setUp()
self.device.NeedsSU = mock.Mock(return_value=False)
def testRunShellCommand_commandAsList(self):
with self.assertCall(self.call.adb.Shell('pm list packages'), ''):
self.device.RunShellCommand(
['pm', 'list', 'packages'], check_return=True)
def testRunShellCommand_commandAsListQuoted(self):
with self.assertCall(self.call.adb.Shell("echo 'hello world' '$10'"), ''):
self.device.RunShellCommand(
['echo', 'hello world', '$10'], check_return=True)
def testRunShellCommand_commandAsString(self):
with self.assertCall(self.call.adb.Shell('echo "$VAR"'), ''):
self.device.RunShellCommand(
'echo "$VAR"', shell=True, check_return=True)
def testNewRunShellImpl_withEnv(self):
with self.assertCall(
self.call.adb.Shell('VAR=some_string echo "$VAR"'), ''):
self.device.RunShellCommand(
'echo "$VAR"', shell=True, check_return=True,
env={'VAR': 'some_string'})
def testNewRunShellImpl_withEnvQuoted(self):
with self.assertCall(
self.call.adb.Shell('PATH="$PATH:/other/path" run_this'), ''):
self.device.RunShellCommand(
['run_this'], check_return=True, env={'PATH': '$PATH:/other/path'})
def testNewRunShellImpl_withEnv_failure(self):
with self.assertRaises(KeyError):
self.device.RunShellCommand(
['some_cmd'], check_return=True, env={'INVALID NAME': 'value'})
def testNewRunShellImpl_withCwd(self):
with self.assertCall(self.call.adb.Shell('cd /some/test/path && ls'), ''):
self.device.RunShellCommand(
['ls'], check_return=True, cwd='/some/test/path')
def testNewRunShellImpl_withCwdQuoted(self):
with self.assertCall(
self.call.adb.Shell("cd '/some test/path with/spaces' && ls"), ''):
self.device.RunShellCommand(
['ls'], check_return=True, cwd='/some test/path with/spaces')
def testRunShellCommand_withHugeCmd(self):
payload = 'hi! ' * 1024
expected_cmd = "echo '%s'" % payload
with self.assertCalls(
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
self.call.device._WriteFileWithPush('/sdcard/temp-123.sh', expected_cmd),
(self.call.adb.Shell('sh /sdcard/temp-123.sh'), payload + '\n')):
self.assertEquals(
[payload],
self.device.RunShellCommand(['echo', payload], check_return=True))
def testRunShellCommand_withHugeCmdAndSu(self):
payload = 'hi! ' * 1024
expected_cmd_without_su = """sh -c 'echo '"'"'%s'"'"''""" % payload
expected_cmd = 'su -c %s' % expected_cmd_without_su
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device._Su(expected_cmd_without_su), expected_cmd),
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
self.call.device._WriteFileWithPush('/sdcard/temp-123.sh', expected_cmd),
(self.call.adb.Shell('sh /sdcard/temp-123.sh'), payload + '\n')):
self.assertEquals(
[payload],
self.device.RunShellCommand(
['echo', payload], check_return=True, as_root=True))
def testRunShellCommand_withSu(self):
expected_cmd_without_su = "sh -c 'setprop service.adb.root 0'"
expected_cmd = 'su -c %s' % expected_cmd_without_su
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device._Su(expected_cmd_without_su), expected_cmd),
(self.call.adb.Shell(expected_cmd), '')):
self.device.RunShellCommand(
['setprop', 'service.adb.root', '0'],
check_return=True, as_root=True)
def testRunShellCommand_withRunAs(self):
expected_cmd_without_run_as = "sh -c 'mkdir -p files'"
expected_cmd = (
'run-as org.devil.test_package %s' % expected_cmd_without_run_as)
with self.assertCall(self.call.adb.Shell(expected_cmd), ''):
self.device.RunShellCommand(
['mkdir', '-p', 'files'],
check_return=True, run_as='org.devil.test_package')
def testRunShellCommand_withRunAsAndSu(self):
expected_cmd_with_nothing = "sh -c 'mkdir -p files'"
expected_cmd_with_run_as = (
'run-as org.devil.test_package %s' % expected_cmd_with_nothing)
expected_cmd_without_su = (
'sh -c %s' % cmd_helper.SingleQuote(expected_cmd_with_run_as))
expected_cmd = 'su -c %s' % expected_cmd_without_su
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device._Su(expected_cmd_without_su), expected_cmd),
(self.call.adb.Shell(expected_cmd), '')):
self.device.RunShellCommand(
['mkdir', '-p', 'files'],
check_return=True, run_as='org.devil.test_package',
as_root=True)
def testRunShellCommand_manyLines(self):
cmd = 'ls /some/path'
with self.assertCall(self.call.adb.Shell(cmd), 'file1\nfile2\nfile3\n'):
self.assertEquals(
['file1', 'file2', 'file3'],
self.device.RunShellCommand(cmd.split(), check_return=True))
def testRunShellCommand_manyLinesRawOutput(self):
cmd = 'ls /some/path'
with self.assertCall(self.call.adb.Shell(cmd), '\rfile1\nfile2\r\nfile3\n'):
self.assertEquals(
'\rfile1\nfile2\r\nfile3\n',
self.device.RunShellCommand(
cmd.split(), check_return=True, raw_output=True))
def testRunShellCommand_singleLine_success(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), 'some value\n'):
self.assertEquals(
'some value',
self.device.RunShellCommand(
cmd, shell=True, check_return=True, single_line=True))
def testRunShellCommand_singleLine_successEmptyLine(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), '\n'):
self.assertEquals(
'',
self.device.RunShellCommand(
cmd, shell=True, check_return=True, single_line=True))
def testRunShellCommand_singleLine_successWithoutEndLine(self):
cmd = 'echo -n $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), 'some value'):
self.assertEquals(
'some value',
self.device.RunShellCommand(
cmd, shell=True, check_return=True, single_line=True))
def testRunShellCommand_singleLine_successNoOutput(self):
cmd = 'echo -n $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), ''):
self.assertEquals(
'',
self.device.RunShellCommand(
cmd, shell=True, check_return=True, single_line=True))
def testRunShellCommand_singleLine_failTooManyLines(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd),
'some value\nanother value\n'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.RunShellCommand(
cmd, shell=True, check_return=True, single_line=True)
def testRunShellCommand_checkReturn_success(self):
cmd = 'echo $ANDROID_DATA'
output = '/data\n'
with self.assertCall(self.call.adb.Shell(cmd), output):
self.assertEquals(
[output.rstrip()],
self.device.RunShellCommand(cmd, shell=True, check_return=True))
def testRunShellCommand_checkReturn_failure(self):
cmd = 'ls /root'
output = 'opendir failed, Permission denied\n'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError(output)):
with self.assertRaises(device_errors.AdbCommandFailedError):
self.device.RunShellCommand(cmd.split(), check_return=True)
def testRunShellCommand_checkReturn_disabled(self):
cmd = 'ls /root'
output = 'opendir failed, Permission denied\n'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError(output)):
self.assertEquals(
[output.rstrip()],
self.device.RunShellCommand(cmd.split(), check_return=False))
def testRunShellCommand_largeOutput_enabled(self):
cmd = 'echo $VALUE'
temp_file = MockTempFile('/sdcard/temp-123')
cmd_redirect = '( %s )>%s' % (cmd, temp_file.name)
with self.assertCalls(
(mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
temp_file),
(self.call.adb.Shell(cmd_redirect)),
(self.call.device.ReadFile(temp_file.name, force_pull=True),
'something')):
self.assertEquals(
['something'],
self.device.RunShellCommand(
cmd, shell=True, large_output=True, check_return=True))
def testRunShellCommand_largeOutput_disabledNoTrigger(self):
cmd = 'something'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError('')):
with self.assertRaises(device_errors.AdbCommandFailedError):
self.device.RunShellCommand([cmd], check_return=True)
def testRunShellCommand_largeOutput_disabledTrigger(self):
cmd = 'echo $VALUE'
temp_file = MockTempFile('/sdcard/temp-123')
cmd_redirect = '( %s )>%s' % (cmd, temp_file.name)
with self.assertCalls(
(self.call.adb.Shell(cmd), self.ShellError('', None)),
(mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
temp_file),
(self.call.adb.Shell(cmd_redirect)),
(self.call.device.ReadFile(mock.ANY, force_pull=True),
'something')):
self.assertEquals(
['something'],
self.device.RunShellCommand(cmd, shell=True, check_return=True))
class DeviceUtilsRunPipedShellCommandTest(DeviceUtilsTest):
def testRunPipedShellCommand_success(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
shell=True, check_return=True),
['This line contains foo', 'PIPESTATUS: 0 0']):
self.assertEquals(['This line contains foo'],
self.device._RunPipedShellCommand('ps | grep foo'))
def testRunPipedShellCommand_firstCommandFails(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
shell=True, check_return=True),
['PIPESTATUS: 1 0']):
with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
self.device._RunPipedShellCommand('ps | grep foo')
self.assertEquals([1, 0], ec.exception.status)
def testRunPipedShellCommand_secondCommandFails(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
shell=True, check_return=True),
['PIPESTATUS: 0 1']):
with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
self.device._RunPipedShellCommand('ps | grep foo')
self.assertEquals([0, 1], ec.exception.status)
def testRunPipedShellCommand_outputCutOff(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
shell=True, check_return=True),
['foo.bar'] * 256 + ['foo.ba']):
with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
self.device._RunPipedShellCommand('ps | grep foo')
self.assertIs(None, ec.exception.status)
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsKillAllTest(DeviceUtilsTest):
def testKillAll_noMatchingProcessesFailure(self):
with self.assertCall(self.call.device.GetPids('test_process'), {}):
with self.assertRaises(device_errors.CommandFailedError):
self.device.KillAll('test_process')
def testKillAll_noMatchingProcessesQuiet(self):
with self.assertCall(self.call.device.GetPids('test_process'), {}):
self.assertEqual(0, self.device.KillAll('test_process', quiet=True))
def testKillAll_nonblocking(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'),
{'some.process': ['1234'], 'some.processing.thing': ['5678']}),
(self.call.adb.Shell('kill -9 1234 5678'), '')):
self.assertEquals(
2, self.device.KillAll('some.process', blocking=False))
def testKillAll_blocking(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'),
{'some.process': ['1234'], 'some.processing.thing': ['5678']}),
(self.call.adb.Shell('kill -9 1234 5678'), ''),
(self.call.device.GetPids('some.process'),
{'some.processing.thing': ['5678']}),
(self.call.device.GetPids('some.process'),
{'some.process': ['1111']})): # Other instance with different pid.
self.assertEquals(
2, self.device.KillAll('some.process', blocking=True))
def testKillAll_exactNonblocking(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'),
{'some.process': ['1234'], 'some.processing.thing': ['5678']}),
(self.call.adb.Shell('kill -9 1234'), '')):
self.assertEquals(
1, self.device.KillAll('some.process', exact=True, blocking=False))
def testKillAll_exactBlocking(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'),
{'some.process': ['1234'], 'some.processing.thing': ['5678']}),
(self.call.adb.Shell('kill -9 1234'), ''),
(self.call.device.GetPids('some.process'),
{'some.process': ['1234'], 'some.processing.thing': ['5678']}),
(self.call.device.GetPids('some.process'),
{'some.processing.thing': ['5678']})):
self.assertEquals(
1, self.device.KillAll('some.process', exact=True, blocking=True))
def testKillAll_root(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'), {'some.process': ['1234']}),
(self.call.device.NeedsSU(), True),
(self.call.device._Su("sh -c 'kill -9 1234'"),
"su -c sh -c 'kill -9 1234'"),
(self.call.adb.Shell("su -c sh -c 'kill -9 1234'"), '')):
self.assertEquals(
1, self.device.KillAll('some.process', as_root=True))
def testKillAll_sigterm(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'),
{'some.process': ['1234']}),
(self.call.adb.Shell('kill -15 1234'), '')):
self.assertEquals(
1, self.device.KillAll('some.process', signum=device_signal.SIGTERM))
def testKillAll_multipleInstances(self):
with self.assertCalls(
(self.call.device.GetPids('some.process'),
{'some.process': ['1234', '4567']}),
(self.call.adb.Shell('kill -15 1234 4567'), '')):
self.assertEquals(
2, self.device.KillAll('some.process', signum=device_signal.SIGTERM))
class DeviceUtilsStartActivityTest(DeviceUtilsTest):
def testStartActivity_actionOnly(self):
test_intent = intent.Intent(action='android.intent.action.VIEW')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_success(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_failure(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main'),
'Error: Failed to start test activity'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.StartActivity(test_intent)
def testStartActivity_blocking(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-W '
'-a android.intent.action.VIEW '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent, blocking=True)
def testStartActivity_withCategory(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main',
category='android.intent.category.HOME')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-c android.intent.category.HOME '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withMultipleCategories(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main',
category=['android.intent.category.HOME',
'android.intent.category.BROWSABLE'])
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-c android.intent.category.HOME '
'-c android.intent.category.BROWSABLE '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withData(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main',
data='http://www.google.com/')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-d http://www.google.com/ '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withStringExtra(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main',
extras={'foo': 'test'})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main '
'--es foo test'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withBoolExtra(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main',
extras={'foo': True})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main '
'--ez foo True'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withIntExtra(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main',
extras={'foo': 123})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main '
'--ei foo 123'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withTraceFile(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'--start-profiler test_trace_file.out '
'-a android.intent.action.VIEW '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent,
trace_file_name='test_trace_file.out')
def testStartActivity_withForceStop(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-S '
'-a android.intent.action.VIEW '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent, force_stop=True)
def testStartActivity_withFlags(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='test.package',
activity='.Main',
flags=[
intent.FLAG_ACTIVITY_NEW_TASK,
intent.FLAG_ACTIVITY_RESET_TASK_IF_NEEDED
])
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main '
'-f 0x10200000'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
class DeviceUtilsStartInstrumentationTest(DeviceUtilsTest):
def testStartInstrumentation_nothing(self):
with self.assertCalls(
self.call.device.RunShellCommand(
'p=test.package;am instrument "$p"/.TestInstrumentation',
shell=True, check_return=True, large_output=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False, raw=False, extras=None)
def testStartInstrumentation_finish(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
'p=test.package;am instrument -w "$p"/.TestInstrumentation',
shell=True, check_return=True, large_output=True),
['OK (1 test)'])):
output = self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=True, raw=False, extras=None)
self.assertEquals(['OK (1 test)'], output)
def testStartInstrumentation_raw(self):
with self.assertCalls(
self.call.device.RunShellCommand(
'p=test.package;am instrument -r "$p"/.TestInstrumentation',
shell=True, check_return=True, large_output=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False, raw=True, extras=None)
def testStartInstrumentation_extras(self):
with self.assertCalls(
self.call.device.RunShellCommand(
'p=test.package;am instrument -e "$p".foo Foo -e bar \'Val \'"$p" '
'"$p"/.TestInstrumentation',
shell=True, check_return=True, large_output=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False, raw=False, extras={'test.package.foo': 'Foo',
'bar': 'Val test.package'})
class DeviceUtilsBroadcastIntentTest(DeviceUtilsTest):
def testBroadcastIntent_noExtras(self):
test_intent = intent.Intent(action='test.package.with.an.INTENT')
with self.assertCall(
self.call.adb.Shell('am broadcast -a test.package.with.an.INTENT'),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
def testBroadcastIntent_withExtra(self):
test_intent = intent.Intent(action='test.package.with.an.INTENT',
extras={'foo': 'bar value'})
with self.assertCall(
self.call.adb.Shell(
"am broadcast -a test.package.with.an.INTENT --es foo 'bar value'"),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
def testBroadcastIntent_withExtra_noValue(self):
test_intent = intent.Intent(action='test.package.with.an.INTENT',
extras={'foo': None})
with self.assertCall(
self.call.adb.Shell(
'am broadcast -a test.package.with.an.INTENT --esn foo'),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
class DeviceUtilsGoHomeTest(DeviceUtilsTest):
def testGoHome_popupsExist(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['am', 'start', '-W', '-a', 'android.intent.action.MAIN',
'-c', 'android.intent.category.HOME'], check_return=True),
'Starting: Intent { act=android.intent.action.MAIN }\r\n'''),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['input', 'keyevent', '66'], check_return=True)),
(self.call.device.RunShellCommand(
['input', 'keyevent', '4'], check_return=True)),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True),
['mCurrentFocus Launcher'])):
self.device.GoHome()
def testGoHome_willRetry(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['am', 'start', '-W', '-a', 'android.intent.action.MAIN',
'-c', 'android.intent.category.HOME'], check_return=True),
'Starting: Intent { act=android.intent.action.MAIN }\r\n'''),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['input', 'keyevent', '66'], check_return=True,)),
(self.call.device.RunShellCommand(
['input', 'keyevent', '4'], check_return=True)),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['input', 'keyevent', '66'], check_return=True)),
(self.call.device.RunShellCommand(
['input', 'keyevent', '4'], check_return=True)),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.GoHome()
def testGoHome_alreadyFocused(self):
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True),
['mCurrentFocus Launcher']):
self.device.GoHome()
def testGoHome_alreadyFocusedAlternateCase(self):
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True),
[' mCurrentFocus .launcher/.']):
self.device.GoHome()
def testGoHome_obtainsFocusAfterGoingHome(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['am', 'start', '-W', '-a', 'android.intent.action.MAIN',
'-c', 'android.intent.category.HOME'], check_return=True),
'Starting: Intent { act=android.intent.action.MAIN }\r\n'''),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True),
['mCurrentFocus Launcher'])):
self.device.GoHome()
class DeviceUtilsForceStopTest(DeviceUtilsTest):
def testForceStop(self):
with self.assertCalls(
(self.call.device.GetPids('test.package'), {'test.package': [1111]}),
(self.call.device.RunShellCommand(
['am', 'force-stop', 'test.package'],
check_return=True),
['Success'])):
self.device.ForceStop('test.package')
def testForceStop_NoProcessFound(self):
with self.assertCall(
self.call.device.GetPids('test.package'), {}):
self.device.ForceStop('test.package')
class DeviceUtilsClearApplicationStateTest(DeviceUtilsTest):
def testClearApplicationState_setPermissions(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '17'),
(self.call.device._GetApplicationPathsInternal('this.package.exists'),
['/data/app/this.package.exists.apk']),
(self.call.device.RunShellCommand(
['pm', 'clear', 'this.package.exists'],
check_return=True),
['Success']),
(self.call.device.GrantPermissions(
'this.package.exists', ['p1']), [])):
self.device.ClearApplicationState(
'this.package.exists', permissions=['p1'])
def testClearApplicationState_packageDoesntExist(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '11'),
(self.call.device._GetApplicationPathsInternal('does.not.exist'),
[])):
self.device.ClearApplicationState('does.not.exist')
def testClearApplicationState_packageDoesntExistOnAndroidJBMR2OrAbove(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '18'),
(self.call.device.RunShellCommand(
['pm', 'clear', 'this.package.does.not.exist'],
check_return=True),
['Failed'])):
self.device.ClearApplicationState('this.package.does.not.exist')
def testClearApplicationState_packageExists(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '17'),
(self.call.device._GetApplicationPathsInternal('this.package.exists'),
['/data/app/this.package.exists.apk']),
(self.call.device.RunShellCommand(
['pm', 'clear', 'this.package.exists'],
check_return=True),
['Success'])):
self.device.ClearApplicationState('this.package.exists')
def testClearApplicationState_packageExistsOnAndroidJBMR2OrAbove(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '18'),
(self.call.device.RunShellCommand(
['pm', 'clear', 'this.package.exists'],
check_return=True),
['Success'])):
self.device.ClearApplicationState('this.package.exists')
class DeviceUtilsSendKeyEventTest(DeviceUtilsTest):
def testSendKeyEvent(self):
with self.assertCall(self.call.adb.Shell('input keyevent 66'), ''):
self.device.SendKeyEvent(66)
class DeviceUtilsPushChangedFilesIndividuallyTest(DeviceUtilsTest):
def testPushChangedFilesIndividually_empty(self):
test_files = []
with self.assertCalls():
self.device._PushChangedFilesIndividually(test_files)
def testPushChangedFilesIndividually_single(self):
test_files = [('/test/host/path', '/test/device/path')]
with self.assertCalls(self.call.adb.Push(*test_files[0])):
self.device._PushChangedFilesIndividually(test_files)
def testPushChangedFilesIndividually_multiple(self):
test_files = [
('/test/host/path/file1', '/test/device/path/file1'),
('/test/host/path/file2', '/test/device/path/file2')]
with self.assertCalls(
self.call.adb.Push(*test_files[0]),
self.call.adb.Push(*test_files[1])):
self.device._PushChangedFilesIndividually(test_files)
class DeviceUtilsPushChangedFilesZippedTest(DeviceUtilsTest):
def testPushChangedFilesZipped_noUnzipCommand(self):
test_files = [('/test/host/path/file1', '/test/device/path/file1')]
mock_zip_temp = mock.mock_open()
mock_zip_temp.return_value.name = '/test/temp/file/tmp.zip'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(suffix='.zip'), mock_zip_temp),
(mock.call.multiprocessing.Process(
target=device_utils.DeviceUtils._CreateDeviceZip,
args=('/test/temp/file/tmp.zip', test_files)), mock.Mock()),
(self.call.device._MaybeInstallCommands(), False)):
self.assertFalse(self.device._PushChangedFilesZipped(test_files,
['/test/dir']))
def _testPushChangedFilesZipped_spec(self, test_files):
mock_zip_temp = mock.mock_open()
mock_zip_temp.return_value.name = '/test/temp/file/tmp.zip'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(suffix='.zip'), mock_zip_temp),
(mock.call.multiprocessing.Process(
target=device_utils.DeviceUtils._CreateDeviceZip,
args=('/test/temp/file/tmp.zip', test_files)), mock.Mock()),
(self.call.device._MaybeInstallCommands(), True),
(self.call.device.NeedsSU(), True),
(mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb,
suffix='.zip'),
MockTempFile('/test/sdcard/foo123.zip')),
self.call.adb.Push(
'/test/temp/file/tmp.zip', '/test/sdcard/foo123.zip'),
self.call.device.RunShellCommand(
'unzip /test/sdcard/foo123.zip&&chmod -R 777 /test/dir',
shell=True, as_root=True,
env={'PATH': '/data/local/tmp/bin:$PATH'},
check_return=True)):
self.assertTrue(self.device._PushChangedFilesZipped(test_files,
['/test/dir']))
def testPushChangedFilesZipped_single(self):
self._testPushChangedFilesZipped_spec(
[('/test/host/path/file1', '/test/device/path/file1')])
def testPushChangedFilesZipped_multiple(self):
self._testPushChangedFilesZipped_spec(
[('/test/host/path/file1', '/test/device/path/file1'),
('/test/host/path/file2', '/test/device/path/file2')])
class DeviceUtilsPathExistsTest(DeviceUtilsTest):
def testPathExists_pathExists(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/path/file exists'],
as_root=False, check_return=True, timeout=10, retries=0),
[]):
self.assertTrue(self.device.PathExists('/path/file exists'))
def testPathExists_multiplePathExists(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/path 1', '-a', '-e', '/path2'],
as_root=False, check_return=True, timeout=10, retries=0),
[]):
self.assertTrue(self.device.PathExists(('/path 1', '/path2')))
def testPathExists_pathDoesntExist(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/path/file.not.exists'],
as_root=False, check_return=True, timeout=10, retries=0),
self.ShellError()):
self.assertFalse(self.device.PathExists('/path/file.not.exists'))
def testPathExists_asRoot(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/root/path/exists'],
as_root=True, check_return=True, timeout=10, retries=0),
self.ShellError()):
self.assertFalse(
self.device.PathExists('/root/path/exists', as_root=True))
def testFileExists_pathDoesntExist(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/path/file.not.exists'],
as_root=False, check_return=True, timeout=10, retries=0),
self.ShellError()):
self.assertFalse(self.device.FileExists('/path/file.not.exists'))
class DeviceUtilsRemovePathTest(DeviceUtilsTest):
def testRemovePath_regular(self):
with self.assertCall(
self.call.device.RunShellCommand(
['rm', 'some file'], as_root=False, check_return=True),
[]):
self.device.RemovePath('some file')
def testRemovePath_withForce(self):
with self.assertCall(
self.call.device.RunShellCommand(
['rm', '-f', 'some file'], as_root=False, check_return=True),
[]):
self.device.RemovePath('some file', force=True)
def testRemovePath_recursively(self):
with self.assertCall(
self.call.device.RunShellCommand(
['rm', '-r', '/remove/this/dir'], as_root=False, check_return=True),
[]):
self.device.RemovePath('/remove/this/dir', recursive=True)
def testRemovePath_withRoot(self):
with self.assertCall(
self.call.device.RunShellCommand(
['rm', 'some file'], as_root=True, check_return=True),
[]):
self.device.RemovePath('some file', as_root=True)
def testRemovePath_manyPaths(self):
with self.assertCall(
self.call.device.RunShellCommand(
['rm', 'eeny', 'meeny', 'miny', 'moe'],
as_root=False, check_return=True),
[]):
self.device.RemovePath(['eeny', 'meeny', 'miny', 'moe'])
class DeviceUtilsPullFileTest(DeviceUtilsTest):
def testPullFile_existsOnDevice(self):
with mock.patch('os.path.exists', return_value=True):
with self.assertCall(
self.call.adb.Pull('/data/app/test.file.exists',
'/test/file/host/path')):
self.device.PullFile('/data/app/test.file.exists',
'/test/file/host/path')
def testPullFile_doesntExistOnDevice(self):
with mock.patch('os.path.exists', return_value=True):
with self.assertCall(
self.call.adb.Pull('/data/app/test.file.does.not.exist',
'/test/file/host/path'),
self.CommandError('remote object does not exist')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.PullFile('/data/app/test.file.does.not.exist',
'/test/file/host/path')
class DeviceUtilsReadFileTest(DeviceUtilsTest):
def testReadFileWithPull_success(self):
tmp_host_dir = '/tmp/dir/on.host/'
tmp_host = MockTempFile('/tmp/dir/on.host/tmp_ReadFileWithPull')
tmp_host.file.read.return_value = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.mkdtemp(), tmp_host_dir),
(self.call.adb.Pull('/path/to/device/file', mock.ANY)),
(mock.call.__builtin__.open(mock.ANY, 'r'), tmp_host),
(mock.call.os.path.exists(tmp_host_dir), True),
(mock.call.shutil.rmtree(tmp_host_dir), None)):
self.assertEquals('some interesting contents',
self.device._ReadFileWithPull('/path/to/device/file'))
tmp_host.file.read.assert_called_once_with()
def testReadFileWithPull_rejected(self):
tmp_host_dir = '/tmp/dir/on.host/'
with self.assertCalls(
(mock.call.tempfile.mkdtemp(), tmp_host_dir),
(self.call.adb.Pull('/path/to/device/file', mock.ANY),
self.CommandError()),
(mock.call.os.path.exists(tmp_host_dir), True),
(mock.call.shutil.rmtree(tmp_host_dir), None)):
with self.assertRaises(device_errors.CommandFailedError):
self.device._ReadFileWithPull('/path/to/device/file')
def testReadFile_exists(self):
with self.assertCalls(
(self.call.device.FileSize('/read/this/test/file', as_root=False), 256),
(self.call.device.RunShellCommand(
['cat', '/read/this/test/file'],
as_root=False, check_return=True),
['this is a test file'])):
self.assertEqual('this is a test file\n',
self.device.ReadFile('/read/this/test/file'))
def testReadFile_exists2(self):
# Same as testReadFile_exists, but uses Android N ls output.
with self.assertCalls(
(self.call.device.FileSize('/read/this/test/file', as_root=False), 256),
(self.call.device.RunShellCommand(
['cat', '/read/this/test/file'],
as_root=False, check_return=True),
['this is a test file'])):
self.assertEqual('this is a test file\n',
self.device.ReadFile('/read/this/test/file'))
def testReadFile_doesNotExist(self):
with self.assertCall(
self.call.device.FileSize('/this/file/does.not.exist', as_root=False),
self.CommandError('File does not exist')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.ReadFile('/this/file/does.not.exist')
def testReadFile_zeroSize(self):
with self.assertCalls(
(self.call.device.FileSize('/this/file/has/zero/size', as_root=False),
0),
(self.call.device._ReadFileWithPull('/this/file/has/zero/size'),
'but it has contents\n')):
self.assertEqual('but it has contents\n',
self.device.ReadFile('/this/file/has/zero/size'))
def testReadFile_withSU(self):
with self.assertCalls(
(self.call.device.FileSize(
'/this/file/can.be.read.with.su', as_root=True), 256),
(self.call.device.RunShellCommand(
['cat', '/this/file/can.be.read.with.su'],
as_root=True, check_return=True),
['this is a test file', 'read with su'])):
self.assertEqual(
'this is a test file\nread with su\n',
self.device.ReadFile('/this/file/can.be.read.with.su',
as_root=True))
def testReadFile_withPull(self):
contents = 'a' * 123456
with self.assertCalls(
(self.call.device.FileSize('/read/this/big/test/file', as_root=False),
123456),
(self.call.device._ReadFileWithPull('/read/this/big/test/file'),
contents)):
self.assertEqual(
contents, self.device.ReadFile('/read/this/big/test/file'))
def testReadFile_withPullAndSU(self):
contents = 'b' * 123456
with self.assertCalls(
(self.call.device.FileSize(
'/this/big/file/can.be.read.with.su', as_root=True), 123456),
(self.call.device.NeedsSU(), True),
(mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
MockTempFile('/sdcard/tmp/on.device')),
self.call.device.RunShellCommand(
'SRC=/this/big/file/can.be.read.with.su DEST=/sdcard/tmp/on.device;'
'cp "$SRC" "$DEST" && chmod 666 "$DEST"',
shell=True, as_root=True, check_return=True),
(self.call.device._ReadFileWithPull('/sdcard/tmp/on.device'),
contents)):
self.assertEqual(
contents,
self.device.ReadFile('/this/big/file/can.be.read.with.su',
as_root=True))
def testReadFile_forcePull(self):
contents = 'a' * 123456
with self.assertCall(
self.call.device._ReadFileWithPull('/read/this/big/test/file'),
contents):
self.assertEqual(
contents,
self.device.ReadFile('/read/this/big/test/file', force_pull=True))
class DeviceUtilsWriteFileTest(DeviceUtilsTest):
def testWriteFileWithPush_success(self):
tmp_host = MockTempFile('/tmp/file/on.host')
contents = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(), tmp_host),
self.call.adb.Push('/tmp/file/on.host', '/path/to/device/file')):
self.device._WriteFileWithPush('/path/to/device/file', contents)
tmp_host.file.write.assert_called_once_with(contents)
def testWriteFileWithPush_rejected(self):
tmp_host = MockTempFile('/tmp/file/on.host')
contents = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(), tmp_host),
(self.call.adb.Push('/tmp/file/on.host', '/path/to/device/file'),
self.CommandError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device._WriteFileWithPush('/path/to/device/file', contents)
def testWriteFile_withPush(self):
contents = 'some large contents ' * 26 # 20 * 26 = 520 chars
with self.assertCalls(
self.call.device._WriteFileWithPush('/path/to/device/file', contents)):
self.device.WriteFile('/path/to/device/file', contents)
def testWriteFile_withPushForced(self):
contents = 'tiny contents'
with self.assertCalls(
self.call.device._WriteFileWithPush('/path/to/device/file', contents)):
self.device.WriteFile('/path/to/device/file', contents, force_push=True)
def testWriteFile_withPushAndSU(self):
contents = 'some large contents ' * 26 # 20 * 26 = 520 chars
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
MockTempFile('/sdcard/tmp/on.device')),
self.call.device._WriteFileWithPush('/sdcard/tmp/on.device', contents),
self.call.device.RunShellCommand(
['cp', '/sdcard/tmp/on.device', '/path/to/device/file'],
as_root=True, check_return=True)):
self.device.WriteFile('/path/to/device/file', contents, as_root=True)
def testWriteFile_withEcho(self):
with self.assertCall(self.call.adb.Shell(
"echo -n the.contents > /test/file/to.write"), ''):
self.device.WriteFile('/test/file/to.write', 'the.contents')
def testWriteFile_withEchoAndQuotes(self):
with self.assertCall(self.call.adb.Shell(
"echo -n 'the contents' > '/test/file/to write'"), ''):
self.device.WriteFile('/test/file/to write', 'the contents')
def testWriteFile_withEchoAndSU(self):
expected_cmd_without_su = "sh -c 'echo -n contents > /test/file'"
expected_cmd = 'su -c %s' % expected_cmd_without_su
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device._Su(expected_cmd_without_su), expected_cmd),
(self.call.adb.Shell(expected_cmd),
'')):
self.device.WriteFile('/test/file', 'contents', as_root=True)
class DeviceUtilsStatDirectoryTest(DeviceUtilsTest):
# Note: Also tests ListDirectory in testStatDirectory_fileList.
EXAMPLE_LS_OUTPUT = [
'total 12345',
'drwxr-xr-x 19 root root 0 1970-04-06 18:03 .',
'drwxr-xr-x 19 root root 0 1970-04-06 18:03 ..',
'drwxr-xr-x 6 root root 1970-01-01 00:00 some_dir',
'-rw-r--r-- 1 root root 723 1971-01-01 07:04 some_file',
'-rw-r----- 1 root root 327 2009-02-13 23:30 My Music File',
# Older Android versions do not print st_nlink
'lrwxrwxrwx root root 1970-01-01 00:00 lnk -> /some/path',
'srwxrwx--- system system 2016-05-31 17:25 a_socket1',
'drwxrwxrwt system misc 1970-11-23 02:25 tmp',
'drwxr-s--- system shell 1970-11-23 02:24 my_cmd',
'cr--r----- root system 10, 183 1971-01-01 07:04 random',
'brw------- root root 7, 0 1971-01-01 07:04 block_dev',
'-rwS------ root shell 157404 2015-04-13 15:44 silly',
]
FILENAMES = [
'some_dir', 'some_file', 'My Music File', 'lnk', 'a_socket1',
'tmp', 'my_cmd', 'random', 'block_dev', 'silly']
def getStatEntries(self, path_given='/', path_listed='/'):
with self.assertCall(
self.call.device.RunShellCommand(
['ls', '-a', '-l', path_listed],
check_return=True, as_root=False, env={'TZ': 'utc'}),
self.EXAMPLE_LS_OUTPUT):
entries = self.device.StatDirectory(path_given)
return {f['filename']: f for f in entries}
def getListEntries(self):
with self.assertCall(
self.call.device.RunShellCommand(
['ls', '-a', '-l', '/'],
check_return=True, as_root=False, env={'TZ': 'utc'}),
self.EXAMPLE_LS_OUTPUT):
return self.device.ListDirectory('/')
def testStatDirectory_forceTrailingSlash(self):
self.getStatEntries(path_given='/foo/bar/', path_listed='/foo/bar/')
self.getStatEntries(path_given='/foo/bar', path_listed='/foo/bar/')
def testStatDirectory_fileList(self):
self.assertItemsEqual(self.getStatEntries().keys(), self.FILENAMES)
self.assertItemsEqual(self.getListEntries(), self.FILENAMES)
def testStatDirectory_fileModes(self):
expected_modes = (
('some_dir', stat.S_ISDIR),
('some_file', stat.S_ISREG),
('lnk', stat.S_ISLNK),
('a_socket1', stat.S_ISSOCK),
('block_dev', stat.S_ISBLK),
('random', stat.S_ISCHR),
)
entries = self.getStatEntries()
for filename, check in expected_modes:
self.assertTrue(check(entries[filename]['st_mode']))
def testStatDirectory_filePermissions(self):
should_have = (
('some_file', stat.S_IWUSR), # Owner can write.
('tmp', stat.S_IXOTH), # Others can execute.
('tmp', stat.S_ISVTX), # Has sticky bit.
('my_cmd', stat.S_ISGID), # Has set-group-ID bit.
('silly', stat.S_ISUID), # Has set UID bit.
)
should_not_have = (
('some_file', stat.S_IWOTH), # Others can't write.
('block_dev', stat.S_IRGRP), # Group can't read.
('silly', stat.S_IXUSR), # Owner can't execute.
)
entries = self.getStatEntries()
for filename, bit in should_have:
self.assertTrue(entries[filename]['st_mode'] & bit)
for filename, bit in should_not_have:
self.assertFalse(entries[filename]['st_mode'] & bit)
def testStatDirectory_numHardLinks(self):
entries = self.getStatEntries()
self.assertEqual(entries['some_dir']['st_nlink'], 6)
self.assertEqual(entries['some_file']['st_nlink'], 1)
self.assertFalse('st_nlink' in entries['tmp'])
def testStatDirectory_fileOwners(self):
entries = self.getStatEntries()
self.assertEqual(entries['some_dir']['st_owner'], 'root')
self.assertEqual(entries['my_cmd']['st_owner'], 'system')
self.assertEqual(entries['my_cmd']['st_group'], 'shell')
self.assertEqual(entries['tmp']['st_group'], 'misc')
def testStatDirectory_fileSize(self):
entries = self.getStatEntries()
self.assertEqual(entries['some_file']['st_size'], 723)
self.assertEqual(entries['My Music File']['st_size'], 327)
# Sizes are sometimes not reported for non-regular files, don't try to
# guess the size in those cases.
self.assertFalse('st_size' in entries['some_dir'])
def testStatDirectory_fileDateTime(self):
entries = self.getStatEntries()
self.assertEqual(entries['some_dir']['st_mtime'], 0) # Epoch!
self.assertEqual(entries['My Music File']['st_mtime'], 1234567800)
def testStatDirectory_deviceType(self):
entries = self.getStatEntries()
self.assertEqual(entries['random']['st_rdev_pair'], (10, 183))
self.assertEqual(entries['block_dev']['st_rdev_pair'], (7, 0))
def testStatDirectory_symbolicLinks(self):
entries = self.getStatEntries()
self.assertEqual(entries['lnk']['symbolic_link_to'], '/some/path')
for d in entries.itervalues():
self.assertEqual('symbolic_link_to' in d, stat.S_ISLNK(d['st_mode']))
class DeviceUtilsStatPathTest(DeviceUtilsTest):
EXAMPLE_DIRECTORY = [
{'filename': 'foo.txt', 'st_size': 123, 'st_time': 456},
{'filename': 'some_dir', 'st_time': 0}
]
INDEX = {e['filename']: e for e in EXAMPLE_DIRECTORY}
def testStatPath_file(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
self.assertEquals(self.INDEX['foo.txt'],
self.device.StatPath('/data/local/tmp/foo.txt'))
def testStatPath_directory(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
self.assertEquals(self.INDEX['some_dir'],
self.device.StatPath('/data/local/tmp/some_dir'))
def testStatPath_directoryWithTrailingSlash(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
self.assertEquals(self.INDEX['some_dir'],
self.device.StatPath('/data/local/tmp/some_dir/'))
def testStatPath_doesNotExist(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
with self.assertRaises(device_errors.CommandFailedError):
self.device.StatPath('/data/local/tmp/does.not.exist.txt')
class DeviceUtilsFileSizeTest(DeviceUtilsTest):
EXAMPLE_DIRECTORY = [
{'filename': 'foo.txt', 'st_size': 123, 'st_mtime': 456},
{'filename': 'some_dir', 'st_mtime': 0}
]
def testFileSize_file(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
self.assertEquals(123,
self.device.FileSize('/data/local/tmp/foo.txt'))
def testFileSize_doesNotExist(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
with self.assertRaises(device_errors.CommandFailedError):
self.device.FileSize('/data/local/tmp/does.not.exist.txt')
def testFileSize_directoryWithNoSize(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
with self.assertRaises(device_errors.CommandFailedError):
self.device.FileSize('/data/local/tmp/some_dir')
class DeviceUtilsSetJavaAssertsTest(DeviceUtilsTest):
def testSetJavaAsserts_enable(self):
with self.assertCalls(
(self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'),
self.call.device.WriteFile(
self.device.LOCAL_PROPERTIES_PATH,
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'
'dalvik.vm.enableassertions=all\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), ''),
self.call.device.SetProp('dalvik.vm.enableassertions', 'all')):
self.assertTrue(self.device.SetJavaAsserts(True))
def testSetJavaAsserts_disable(self):
with self.assertCalls(
(self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'dalvik.vm.enableassertions=all\n'
'some.other.prop=value_ok\n'),
self.call.device.WriteFile(
self.device.LOCAL_PROPERTIES_PATH,
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), 'all'),
self.call.device.SetProp('dalvik.vm.enableassertions', '')):
self.assertTrue(self.device.SetJavaAsserts(False))
def testSetJavaAsserts_alreadyEnabled(self):
with self.assertCalls(
(self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'dalvik.vm.enableassertions=all\n'
'some.other.prop=value_ok\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), 'all')):
self.assertFalse(self.device.SetJavaAsserts(True))
def testSetJavaAsserts_malformedLocalProp(self):
with self.assertCalls(
(self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'malformed_property\n'
'dalvik.vm.enableassertions=all\n'
'some.other.prop=value_ok\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), 'all')):
self.assertFalse(self.device.SetJavaAsserts(True))
class DeviceUtilsEnsureCacheInitializedTest(DeviceUtilsTest):
def testEnsureCacheInitialized_noCache_success(self):
self.assertIsNone(self.device._cache['token'])
with self.assertCall(
self.call.device.RunShellCommand(
AnyStringWith('getprop'),
shell=True, check_return=True, large_output=True),
['/sdcard', 'TOKEN']):
self.device._EnsureCacheInitialized()
self.assertIsNotNone(self.device._cache['token'])
def testEnsureCacheInitialized_noCache_failure(self):
self.assertIsNone(self.device._cache['token'])
with self.assertCall(
self.call.device.RunShellCommand(
AnyStringWith('getprop'),
shell=True, check_return=True, large_output=True),
self.TimeoutError()):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device._EnsureCacheInitialized()
self.assertIsNone(self.device._cache['token'])
def testEnsureCacheInitialized_cache(self):
self.device._cache['token'] = 'TOKEN'
with self.assertCalls():
self.device._EnsureCacheInitialized()
self.assertIsNotNone(self.device._cache['token'])
class DeviceUtilsGetPropTest(DeviceUtilsTest):
def testGetProp_exists(self):
with self.assertCall(
self.call.device.RunShellCommand(
['getprop', 'test.property'], check_return=True, single_line=True,
timeout=self.device._default_timeout,
retries=self.device._default_retries),
'property_value'):
self.assertEqual('property_value',
self.device.GetProp('test.property'))
def testGetProp_doesNotExist(self):
with self.assertCall(
self.call.device.RunShellCommand(
['getprop', 'property.does.not.exist'],
check_return=True, single_line=True,
timeout=self.device._default_timeout,
retries=self.device._default_retries),
''):
self.assertEqual('', self.device.GetProp('property.does.not.exist'))
def testGetProp_cachedRoProp(self):
with self.assertCalls(
self.EnsureCacheInitialized(props=['[ro.build.type]: [userdebug]'])):
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type', cache=True))
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type', cache=True))
class DeviceUtilsSetPropTest(DeviceUtilsTest):
def testSetProp(self):
with self.assertCall(
self.call.device.RunShellCommand(
['setprop', 'test.property', 'test value'], check_return=True)):
self.device.SetProp('test.property', 'test value')
def testSetProp_check_succeeds(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['setprop', 'test.property', 'new_value'], check_return=True)),
(self.call.device.GetProp('test.property', cache=False), 'new_value')):
self.device.SetProp('test.property', 'new_value', check=True)
def testSetProp_check_fails(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['setprop', 'test.property', 'new_value'], check_return=True)),
(self.call.device.GetProp('test.property', cache=False), 'old_value')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.SetProp('test.property', 'new_value', check=True)
class DeviceUtilsGetPidsTest(DeviceUtilsTest):
def setUp(self):
super(DeviceUtilsGetPidsTest, self).setUp()
self.sample_output = [
'USER PID PPID VSIZE RSS WCHAN PC NAME',
'user 1001 100 1024 1024 ffffffff 00000000 one.match',
'user 1002 100 1024 1024 ffffffff 00000000 two.match',
'user 1003 100 1024 1024 ffffffff 00000000 three.match',
'user 1234 100 1024 1024 ffffffff 00000000 my$process',
'user 1000 100 1024 1024 ffffffff 00000000 foo',
'user 1236 100 1024 1024 ffffffff 00000000 foo',
]
def _grepOutput(self, substring):
return [line for line in self.sample_output if substring in line]
def testGetPids_sdkGreaterThanNougatMR1(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=(version_codes.NOUGAT_MR1 + 1)):
with self.patch_call(self.call.device.build_id,
return_value='ZZZ99Z'):
with self.assertCall(
self.call.device._RunPipedShellCommand(
'ps -e | grep -F example.process'), []):
self.device.GetPids('example.process')
def testGetPids_noMatches(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F does.not.match'),
self._grepOutput('does.not.match')):
self.assertEqual({}, self.device.GetPids('does.not.match'))
def testGetPids_oneMatch(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F one.match'),
self._grepOutput('one.match')):
self.assertEqual(
{'one.match': ['1001']},
self.device.GetPids('one.match'))
def testGetPids_multipleMatches(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F match'),
self._grepOutput('match')):
self.assertEqual(
{'one.match': ['1001'],
'two.match': ['1002'],
'three.match': ['1003']},
self.device.GetPids('match'))
def testGetPids_quotable(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand("ps | grep -F 'my$process'"),
self._grepOutput('my$process')):
self.assertEqual(
{'my$process': ['1234']}, self.device.GetPids('my$process'))
def testGetPids_multipleInstances(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F foo'),
self._grepOutput('foo')):
self.assertEqual(
{'foo': ['1000', '1236']},
self.device.GetPids('foo'))
def testGetPids_allProcesses(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device.RunShellCommand(
['ps'], check_return=True, large_output=True),
self.sample_output):
self.assertEqual(
{'one.match': ['1001'],
'two.match': ['1002'],
'three.match': ['1003'],
'my$process': ['1234'],
'foo': ['1000', '1236']},
self.device.GetPids())
def testGetApplicationPids_notFound(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F match'),
self._grepOutput('match')):
# No PIDs found, process name should be exact match.
self.assertEqual([], self.device.GetApplicationPids('match'))
def testGetApplicationPids_foundOne(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F one.match'),
self._grepOutput('one.match')):
self.assertEqual(['1001'], self.device.GetApplicationPids('one.match'))
def testGetApplicationPids_foundMany(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F foo'),
self._grepOutput('foo')):
self.assertEqual(
['1000', '1236'],
self.device.GetApplicationPids('foo'))
def testGetApplicationPids_atMostOneNotFound(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F match'),
self._grepOutput('match')):
# No PIDs found, process name should be exact match.
self.assertEqual(
None,
self.device.GetApplicationPids('match', at_most_one=True))
def testGetApplicationPids_atMostOneFound(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F one.match'),
self._grepOutput('one.match')):
self.assertEqual(
'1001',
self.device.GetApplicationPids('one.match', at_most_one=True))
def testGetApplicationPids_atMostOneFoundTooMany(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertRaises(device_errors.CommandFailedError):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F foo'),
self._grepOutput('foo')):
self.device.GetApplicationPids('foo', at_most_one=True)
class DeviceUtilsGetSetEnforce(DeviceUtilsTest):
def testGetEnforce_Enforcing(self):
with self.assertCall(self.call.adb.Shell('getenforce'), 'Enforcing'):
self.assertEqual(True, self.device.GetEnforce())
def testGetEnforce_Permissive(self):
with self.assertCall(self.call.adb.Shell('getenforce'), 'Permissive'):
self.assertEqual(False, self.device.GetEnforce())
def testGetEnforce_Disabled(self):
with self.assertCall(self.call.adb.Shell('getenforce'), 'Disabled'):
self.assertEqual(None, self.device.GetEnforce())
def testSetEnforce_Enforcing(self):
with self.assertCalls(
(self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 1'), '')):
self.device.SetEnforce(enabled=True)
def testSetEnforce_Permissive(self):
with self.assertCalls(
(self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 0'), '')):
self.device.SetEnforce(enabled=False)
def testSetEnforce_EnforcingWithInt(self):
with self.assertCalls(
(self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 1'), '')):
self.device.SetEnforce(enabled=1)
def testSetEnforce_PermissiveWithInt(self):
with self.assertCalls(
(self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 0'), '')):
self.device.SetEnforce(enabled=0)
def testSetEnforce_EnforcingWithStr(self):
with self.assertCalls(
(self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 1'), '')):
self.device.SetEnforce(enabled='1')
def testSetEnforce_PermissiveWithStr(self):
with self.assertCalls(
(self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 0'), '')):
self.device.SetEnforce(enabled='0') # Not recommended but it works!
class DeviceUtilsTakeScreenshotTest(DeviceUtilsTest):
def testTakeScreenshot_fileNameProvided(self):
with self.assertCalls(
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb, suffix='.png'),
MockTempFile('/tmp/path/temp-123.png')),
(self.call.adb.Shell('/system/bin/screencap -p /tmp/path/temp-123.png'),
''),
self.call.device.PullFile('/tmp/path/temp-123.png',
'/test/host/screenshot.png')):
self.device.TakeScreenshot('/test/host/screenshot.png')
class DeviceUtilsGetMemoryUsageForPidTest(DeviceUtilsTest):
def setUp(self):
super(DeviceUtilsGetMemoryUsageForPidTest, self).setUp()
def testGetMemoryUsageForPid_validPid(self):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'showmap 1234 | grep TOTAL', as_root=True),
['100 101 102 103 104 105 106 107 TOTAL']),
(self.call.device.ReadFile('/proc/1234/status', as_root=True),
'VmHWM: 1024 kB\n')):
self.assertEqual(
{
'Size': 100,
'Rss': 101,
'Pss': 102,
'Shared_Clean': 103,
'Shared_Dirty': 104,
'Private_Clean': 105,
'Private_Dirty': 106,
'VmHWM': 1024
},
self.device.GetMemoryUsageForPid(1234))
def testGetMemoryUsageForPid_noSmaps(self):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'showmap 4321 | grep TOTAL', as_root=True),
['cannot open /proc/4321/smaps: No such file or directory']),
(self.call.device.ReadFile('/proc/4321/status', as_root=True),
'VmHWM: 1024 kb\n')):
self.assertEquals({'VmHWM': 1024}, self.device.GetMemoryUsageForPid(4321))
def testGetMemoryUsageForPid_noStatus(self):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'showmap 4321 | grep TOTAL', as_root=True),
['100 101 102 103 104 105 106 107 TOTAL']),
(self.call.device.ReadFile('/proc/4321/status', as_root=True),
self.CommandError())):
self.assertEquals(
{
'Size': 100,
'Rss': 101,
'Pss': 102,
'Shared_Clean': 103,
'Shared_Dirty': 104,
'Private_Clean': 105,
'Private_Dirty': 106,
},
self.device.GetMemoryUsageForPid(4321))
class DeviceUtilsDismissCrashDialogIfNeededTest(DeviceUtilsTest):
def testDismissCrashDialogIfNeeded_crashedPageckageNotFound(self):
sample_dumpsys_output = '''
WINDOW MANAGER WINDOWS (dumpsys window windows)
Window #11 Window{f8b647a u0 SearchPanel}:
mDisplayId=0 mSession=Session{8 94:122} mClient=android.os.BinderProxy@1ba5
mOwnerUid=100 mShowToOwnerOnly=false package=com.android.systemui appop=NONE
mAttrs=WM.LayoutParams{(0,0)(fillxfill) gr=#53 sim=#31 ty=2024 fl=100
Requested w=1080 h=1920 mLayoutSeq=426
mBaseLayer=211000 mSubLayer=0 mAnimLayer=211000+0=211000 mLastLayer=211000
'''
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), sample_dumpsys_output.split('\n'))):
package_name = self.device.DismissCrashDialogIfNeeded()
self.assertIsNone(package_name)
def testDismissCrashDialogIfNeeded_crashedPageckageFound(self):
sample_dumpsys_output = '''
WINDOW MANAGER WINDOWS (dumpsys window windows)
Window #11 Window{f8b647a u0 SearchPanel}:
mDisplayId=0 mSession=Session{8 94:122} mClient=android.os.BinderProxy@1ba5
mOwnerUid=102 mShowToOwnerOnly=false package=com.android.systemui appop=NONE
mAttrs=WM.LayoutParams{(0,0)(fillxfill) gr=#53 sim=#31 ty=2024 fl=100
Requested w=1080 h=1920 mLayoutSeq=426
mBaseLayer=211000 mSubLayer=0 mAnimLayer=211000+0=211000 mLastLayer=211000
mHasPermanentDpad=false
mCurrentFocus=Window{3a27740f u0 Application Error: com.android.chrome}
mFocusedApp=AppWindowToken{470af6f token=Token{272ec24e ActivityRecord{t894}}}
'''
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), sample_dumpsys_output.split('\n')),
(self.call.device.RunShellCommand(
['input', 'keyevent', '22'], check_return=True)),
(self.call.device.RunShellCommand(
['input', 'keyevent', '22'], check_return=True)),
(self.call.device.RunShellCommand(
['input', 'keyevent', '66'], check_return=True)),
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'], check_return=True,
large_output=True), [])):
package_name = self.device.DismissCrashDialogIfNeeded()
self.assertEqual(package_name, 'com.android.chrome')
class DeviceUtilsClientCache(DeviceUtilsTest):
def testClientCache_twoCaches(self):
self.device._cache['test'] = 0
client_cache_one = self.device.GetClientCache('ClientOne')
client_cache_one['test'] = 1
client_cache_two = self.device.GetClientCache('ClientTwo')
client_cache_two['test'] = 2
self.assertEqual(self.device._cache['test'], 0)
self.assertEqual(client_cache_one, {'test': 1})
self.assertEqual(client_cache_two, {'test': 2})
self.device._ClearCache()
self.assertTrue('test' not in self.device._cache)
self.assertEqual(client_cache_one, {})
self.assertEqual(client_cache_two, {})
def testClientCache_multipleInstances(self):
client_cache_one = self.device.GetClientCache('ClientOne')
client_cache_one['test'] = 1
client_cache_two = self.device.GetClientCache('ClientOne')
self.assertEqual(client_cache_one, {'test': 1})
self.assertEqual(client_cache_two, {'test': 1})
self.device._ClearCache()
self.assertEqual(client_cache_one, {})
self.assertEqual(client_cache_two, {})
class DeviceUtilsHealthyDevicesTest(mock_calls.TestCase):
def testHealthyDevices_emptyBlacklist_defaultDeviceArg(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials])):
blacklist = mock.NonCallableMock(**{'Read.return_value': []})
devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
for serial, device in zip(test_serials, devices):
self.assertTrue(isinstance(device, device_utils.DeviceUtils))
self.assertEquals(serial, device.adb.GetDeviceSerial())
def testHealthyDevices_blacklist_defaultDeviceArg(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials])):
blacklist = mock.NonCallableMock(
**{'Read.return_value': ['fedcba9876543210']})
devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
self.assertEquals(1, len(devices))
self.assertTrue(isinstance(devices[0], device_utils.DeviceUtils))
self.assertEquals('0123456789abcdef', devices[0].adb.GetDeviceSerial())
def testHealthyDevices_noneDeviceArg_multiple_attached(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_errors.MultipleDevicesError(mock.ANY),
_MockMultipleDevicesError())):
with self.assertRaises(_MockMultipleDevicesError):
device_utils.DeviceUtils.HealthyDevices(device_arg=None)
def testHealthyDevices_noneDeviceArg_one_attached(self):
test_serials = ['0123456789abcdef']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials])):
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=None)
self.assertEquals(1, len(devices))
def testHealthyDevices_noneDeviceArg_no_attached(self):
test_serials = []
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials])):
with self.assertRaises(device_errors.NoDevicesError):
device_utils.DeviceUtils.HealthyDevices(device_arg=None)
def testHealthyDevices_noneDeviceArg_multiple_attached_ANDROID_SERIAL(self):
try:
os.environ['ANDROID_SERIAL'] = '0123456789abcdef'
with self.assertCalls(): # Should skip adb devices when device is known.
device_utils.DeviceUtils.HealthyDevices(device_arg=None)
finally:
del os.environ['ANDROID_SERIAL']
def testHealthyDevices_stringDeviceArg(self):
with self.assertCalls(): # Should skip adb devices when device is known.
devices = device_utils.DeviceUtils.HealthyDevices(
device_arg='0123456789abcdef')
self.assertEquals(1, len(devices))
def testHealthyDevices_EmptyListDeviceArg_multiple_attached(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials])):
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=())
self.assertEquals(2, len(devices))
def testHealthyDevices_EmptyListDeviceArg_ANDROID_SERIAL(self):
try:
os.environ['ANDROID_SERIAL'] = '0123456789abcdef'
with self.assertCalls(): # Should skip adb devices when device is known.
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=())
finally:
del os.environ['ANDROID_SERIAL']
self.assertEquals(1, len(devices))
def testHealthyDevices_EmptyListDeviceArg_no_attached(self):
test_serials = []
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials])):
with self.assertRaises(device_errors.NoDevicesError):
device_utils.DeviceUtils.HealthyDevices(device_arg=[])
def testHealthyDevices_ListDeviceArg(self):
device_arg = ['0123456789abcdef', 'fedcba9876543210']
try:
os.environ['ANDROID_SERIAL'] = 'should-not-apply'
with self.assertCalls(): # Should skip adb devices when device is known.
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=device_arg)
finally:
del os.environ['ANDROID_SERIAL']
self.assertEquals(2, len(devices))
class DeviceUtilsRestartAdbdTest(DeviceUtilsTest):
def testAdbdRestart(self):
mock_temp_file = '/sdcard/temp-123.sh'
with self.assertCalls(
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile(mock_temp_file)),
self.call.device.WriteFile(mock.ANY, mock.ANY),
(self.call.device.RunShellCommand(
['source', mock_temp_file], check_return=True, as_root=True)),
self.call.adb.WaitForDevice()):
self.device.RestartAdbd()
class DeviceUtilsGrantPermissionsTest(DeviceUtilsTest):
def testGrantPermissions_none(self):
self.device.GrantPermissions('package', [])
def testGrantPermissions_underM(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
self.device.GrantPermissions('package', ['p1'])
def testGrantPermissions_one(self):
permissions_cmd = 'pm grant package p1'
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
with self.assertCalls(
(self.call.device.RunShellCommand(
permissions_cmd, shell=True, check_return=True), [])):
self.device.GrantPermissions('package', ['p1'])
def testGrantPermissions_multiple(self):
permissions_cmd = 'pm grant package p1&&pm grant package p2'
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
with self.assertCalls(
(self.call.device.RunShellCommand(
permissions_cmd, shell=True, check_return=True), [])):
self.device.GrantPermissions('package', ['p1', 'p2'])
def testGrantPermissions_WriteExtrnalStorage(self):
permissions_cmd = (
'pm grant package android.permission.WRITE_EXTERNAL_STORAGE&&'
'pm grant package android.permission.READ_EXTERNAL_STORAGE')
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
with self.assertCalls(
(self.call.device.RunShellCommand(
permissions_cmd, shell=True, check_return=True), [])):
self.device.GrantPermissions(
'package', ['android.permission.WRITE_EXTERNAL_STORAGE'])
def testGrantPermissions_BlackList(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
self.device.GrantPermissions(
'package', ['android.permission.ACCESS_MOCK_LOCATION'])
class DeviecUtilsIsScreenOn(DeviceUtilsTest):
_L_SCREEN_ON = ['test=test mInteractive=true']
_K_SCREEN_ON = ['test=test mScreenOn=true']
_L_SCREEN_OFF = ['mInteractive=false']
_K_SCREEN_OFF = ['mScreenOn=false']
def testIsScreenOn_onPreL(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.KITKAT):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'dumpsys input_method | grep mScreenOn'), self._K_SCREEN_ON)):
self.assertTrue(self.device.IsScreenOn())
def testIsScreenOn_onL(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'dumpsys input_method | grep mInteractive'), self._L_SCREEN_ON)):
self.assertTrue(self.device.IsScreenOn())
def testIsScreenOn_offPreL(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.KITKAT):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'dumpsys input_method | grep mScreenOn'), self._K_SCREEN_OFF)):
self.assertFalse(self.device.IsScreenOn())
def testIsScreenOn_offL(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'dumpsys input_method | grep mInteractive'), self._L_SCREEN_OFF)):
self.assertFalse(self.device.IsScreenOn())
def testIsScreenOn_noOutput(self):
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCalls(
(self.call.device._RunPipedShellCommand(
'dumpsys input_method | grep mInteractive'), [])):
with self.assertRaises(device_errors.CommandFailedError):
self.device.IsScreenOn()
class DeviecUtilsSetScreen(DeviceUtilsTest):
@mock.patch('time.sleep', mock.Mock())
def testSetScren_alreadySet(self):
with self.assertCalls(
(self.call.device.IsScreenOn(), False)):
self.device.SetScreen(False)
@mock.patch('time.sleep', mock.Mock())
def testSetScreen_on(self):
with self.assertCalls(
(self.call.device.IsScreenOn(), False),
(self.call.device.SendKeyEvent(keyevent.KEYCODE_POWER), None),
(self.call.device.IsScreenOn(), True)):
self.device.SetScreen(True)
@mock.patch('time.sleep', mock.Mock())
def testSetScreen_off(self):
with self.assertCalls(
(self.call.device.IsScreenOn(), True),
(self.call.device.SendKeyEvent(keyevent.KEYCODE_POWER), None),
(self.call.device.IsScreenOn(), False)):
self.device.SetScreen(False)
@mock.patch('time.sleep', mock.Mock())
def testSetScreen_slow(self):
with self.assertCalls(
(self.call.device.IsScreenOn(), True),
(self.call.device.SendKeyEvent(keyevent.KEYCODE_POWER), None),
(self.call.device.IsScreenOn(), True),
(self.call.device.IsScreenOn(), True),
(self.call.device.IsScreenOn(), False)):
self.device.SetScreen(False)
class DeviecUtilsLoadCacheData(DeviceUtilsTest):
def testTokenMissing(self):
with self.assertCalls(
self.EnsureCacheInitialized()):
self.assertFalse(self.device.LoadCacheData('{}'))
def testTokenStale(self):
with self.assertCalls(
self.EnsureCacheInitialized()):
self.assertFalse(self.device.LoadCacheData('{"token":"foo"}'))
def testTokenMatches(self):
with self.assertCalls(
self.EnsureCacheInitialized()):
self.assertTrue(self.device.LoadCacheData('{"token":"TOKEN"}'))
def testDumpThenLoad(self):
with self.assertCalls(
self.EnsureCacheInitialized()):
data = json.loads(self.device.DumpCacheData())
data['token'] = 'TOKEN'
self.assertTrue(self.device.LoadCacheData(json.dumps(data)))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
| mrtnrdl/.macdots | scripts/bin/platform-tools/systrace/catapult/devil/devil/android/device_utils_test.py | Python | unlicense | 120,907 | [
"MOE"
] | 6db6bfb9bca0d116891e3b50320e156a7b7ec7d3b8565bb2532d57a5ea5e9c88 |
import numpy as np
import matplotlib.pyplot as plt
import uncertainties
from scipy.signal import find_peaks
from scipy.optimize import curve_fit
import scipy.constants as sc
import scipy.integrate as integrate
from uncertainties import ufloat
from uncertainties import unumpy as unp
from uncertainties.unumpy import nominal_values as nomval
from uncertainties.unumpy import std_devs as std
# Loading experimental data and results of further calculations
r = 0.5*45*10**(-3)
L = (73.5+15)*10**(-3)
Omega = 0.5 * ( 1- L/np.sqrt(L**2+r**2))
C_u1 = np.genfromtxt('2018-12-10_Nitschke_Pape/Probe_11.Spe', unpack = True)
Peaks_Eu, Q_Eu = np.genfromtxt('EuropiumQ.txt', unpack = True)
Channels = np.linspace(0,len(C_u1[:3000])-1, len(C_u1[:3000]))
params_energy, covariance_energy_0, covariance_energy_1, params_Q, covariance_Q_0, covariance_Q_1= np.genfromtxt('Europium.txt', unpack = True)
covariance_energy = np.array([covariance_energy_0, covariance_energy_1])
errors_energy = np.sqrt(np.diag(covariance_energy))
covariance_Q = np.array([covariance_Q_0,covariance_Q_1])
errors_Q = np.sqrt(np.diag(covariance_Q))
def Energy(C):
return ufloat(params_energy[0], errors_energy[0])*C + ufloat(params_energy[1], errors_energy[1])
def Gauss(x, A, xmu, sigma, B):
return A * np.exp(-0.5*(x-xmu)**2/sigma**2) + B
def Gauss_Ufloat(x, A, xmu, sigma):
return A * unp.exp(-0.5*(x-xmu)**2/sigma**2)
def AreaGaus(A, sigma):
return np.sqrt(2*np.pi)*sigma*A
def Efficiency(E):
return ufloat(params_Q[0], errors_Q[0])*E**ufloat(params_Q[1], errors_Q[1])
Spektrum = C_u1[:3000]
tges = 4281
Peaks = find_peaks(Spektrum, height = 140)[0]
Peaks_little = find_peaks(Spektrum[550:570], height = 50)[0]
Peaks = np.insert(Peaks, 3, Peaks_little+550)
plt.clf()
plt.hist(unp.nominal_values(Energy(np.arange(0, len(Spektrum[0:1500]), 1))),
bins=unp.nominal_values(Energy(np.linspace(0, len(Spektrum[0:1500]), len(Spektrum[0:1500])))),
weights=Spektrum[0:1500], label='Spektrum')
plt.yscale('log')
plt.plot(nomval(Energy(Peaks)), Spektrum[Peaks], '.',
markersize=4, label='Gauß-Peaks', color='C1', alpha=0.8)
plt.xlim(0,500)
plt.ylabel('Zählungen pro Energie')
plt.xlabel('E / keV')
plt.legend()
#plt.show()
plt.savefig('Plots/unbekannt1.pdf')
Peaks_Energy = Energy(Peaks)
Energy_ba = np.array([53.16, 80.997, 160.61, 223.25, 276.398, 302.85, 356.02, 383.85 ]) # den 2. 160.61 Peak entfernt
#print(Peaks_Energy-Energy_ba)
Params_u1 = []
errors_u1 = []
for n in Peaks:
Params, covariance = curve_fit(Gauss, Channels[n-30:n+30], Spektrum[n-30:n+30], p0 = [C_u1[n], n, 1, 0])
Params_u1.append(Params.tolist())
errors = np.sqrt(np.diag(covariance))
errors_u1.append(errors.tolist())
for i,n in enumerate(Peaks):
l_u = np.int(Channels[n-30])
l_o = np.int(Channels[n+30])
plt.clf()
plt.hist(unp.nominal_values(Energy(np.arange(l_u, l_o, 1))),
bins=unp.nominal_values(Energy(np.linspace(l_u, l_o, len(Spektrum[n-30:n+30])))),
weights=Spektrum[n-30:n+30], label='Spektrum')
Channel_Gauss = np.linspace(n-30,n+30,1000)
plt.plot(unp.nominal_values(Energy(Channel_Gauss)), Gauss(Channel_Gauss,*Params_u1[i]))
plt.show()
Peaks_mittel = np.round(np.asarray(Params_u1)[:,1],0)
Amplitudes = np.asarray(Params_u1)[:,0]
Amplitudes_ufloat = np.asarray([ufloat(n, np.asarray(errors_u1)[i,0]) for i,n in enumerate(np.asarray(Params_u1)[:,0])])
Means_ufloat = np.asarray([ufloat(n, np.asarray(errors_u1)[i,1]) for i,n in enumerate(np.asarray(Params_u1)[:,1])])
sigmas = np.asarray(Params_u1)[:,2]
sigmas_ufloat = np.asarray([ufloat(n, np.asarray(errors_u1)[i,2]) for i,n in enumerate(np.asarray(Params_u1)[:,2])])
Area_Params = np.array([[n,sigmas[i]] for i,n in enumerate(Amplitudes)])
Area_params_ufloat = np.array([[n,sigmas_ufloat[i]] for i,n in enumerate(Amplitudes_ufloat)])
Constants_ufloat = np.asarray([ufloat(n, np.asarray(errors_u1)[i,3]) for i,n in enumerate(np.asarray(Params_u1)[:,3])])
print("--- Find Peaks and gaussian fit---")
print(f"Channel Peaks: {np.round(Peaks_mittel,0)}")
#print(f"Energy Peaks: {Energy(np.round(Peaks_mittel,0))}")
print(f"Energy Literature: {Energy_ba}", '\n')
Area = AreaGaus(Area_Params[:,0], Area_Params[:,1])
Area_ufloat = AreaGaus(Area_params_ufloat[:,0], Area_params_ufloat[:,1])
Area_norm = Area/tges
Area_norm_ufloat = Area_ufloat/tges
print("-- Fit Parameter --")
print(f"Amplituden: {Amplitudes_ufloat}")
print(f"Means: {Energy(Means_ufloat)}")
print(f"Sigmas: {sigmas_ufloat}")
print(f"Constants: {Constants_ufloat}", '\n')
print("--- Calculating the activity ---")
r = 0.5*45*10**(-3)
L = (73.5+15)*10**(-3)
Omega = 0.5 * ( 1- L/np.sqrt(L**2+r**2))
W = np.asarray([0.022, 0.341, 0.006, 0.005, 0.072, 0.183, 0.621, 0.089]) # zweite 0.06 entfernt
Q = Efficiency(Energy(np.round(Peaks_mittel,0)))
Aktivität = np.array([Area_norm[i]/(W[i]*n*Omega) for i,n in enumerate(Q)])
print(f"emission probability: {W}")
print(f"Area under Gaussian Fit: {Area_ufloat}")
print(f"Efficiency: {Q}", '\n')
print(f"resulting acitivity: {Aktivität}")
A_all = sum(Aktivität)/len(Aktivität)#ufloat(np.mean(nomval(Aktivität)),np.std(std(Aktivität)))
A_4 = sum(Aktivität[4:])/len(Aktivität[4:])#ufloat(np.mean(nomval(Aktivität[1:4])),np.std(std(Aktivität[1:4])))
print(f"Mean with all values: {nomval(A_all)}, {std(A_all)}")
print(f"Mean without 1st: {nomval(A_4)}, {std(A_4)}")
| smjhnits/Praktikum_TU_D_16-17 | Fortgeschrittenenpraktikum/Protokolle/V18_Germaniumdetektor/Python/unbekannt1.py | Python | mit | 5,412 | [
"Gaussian"
] | 655e7c008b2e1e3d5560bf2712bf68e5455ab655d519e20564e5cc34a0745b8e |
from uaperrors import StepError
import sys
import os
from logging import getLogger
from abstract_step import AbstractStep
logger = getLogger('uap_logger')
class HtSeqCount(AbstractStep):
'''
The htseq-count script counts the number of reads overlapping a feature.
Input needs to be a file with aligned sequencing reads and a list of genomic
features. For more information see:
http://www-huber.embl.de/users/anders/HTSeq/doc/count.html
'''
def __init__(self, pipeline):
super(HtSeqCount, self).__init__(pipeline)
self.set_cores(2)
# self.add_connection('in/alignments')
# the BAM files
self.add_connection('in/alignments')
# the feature file provided by another step (e.g. cuffmerge)
self.add_connection('in/features', optional=True, format='gtf',
description='reference assembly'
)
# the counts per alignment
self.add_connection('out/counts')
self.require_tool('dd')
self.require_tool('pigz')
self.require_tool('htseq-count')
self.require_tool('samtools')
# Path to external feature file if necessary
self.add_option('feature-file', str, optional=True)
# [Options for 'htseq-count':]
self.add_option('order', str, choices=['name', 'pos'],
optional=False)
self.add_option('stranded', str, choices=['yes', 'no', 'reverse'],
optional=False)
self.add_option('a', int, optional=True)
self.add_option('type', str, default='exon', optional=True)
self.add_option('idattr', str, default='gene_id', optional=True)
self.add_option('mode', str, choices=['union', 'intersection-strict',
'intersection-nonempty'],
default='union', optional=True)
# [Options for 'dd':]
self.add_option('dd-blocksize', str, optional=True, default="2M")
self.add_option('pigz-blocksize', str, optional=True, default="2048")
self.add_option('threads', int, default=2, optional=True,
description="start <n> threads (default:2)")
def runs(self, cc):
# Compile the list of options
options = ['order', 'stranded', 'a', 'type', 'idattr', 'mode']
set_options = [option for option in options if
self.is_option_set_in_config(option)]
option_list = list()
for option in set_options:
if isinstance(self.get_option(option), bool):
if self.get_option(option):
option_list.append('--%s' % option)
else:
option_list.append(
'--%s=%s' % (option, str(self.get_option(option))))
if 'threads' in set_options:
self.set_cores(self.get_option('threads'))
# look for reference assembly in in-connections
option_ref_assembly = self.get_option('feature-file')
if option_ref_assembly is not None:
option_ref_assembly = os.path.abspath(option_ref_assembly)
if not os.path.isfile(option_ref_assembly):
raise StepError(self, '%s is no file.' %
self.get_option('feature-file'))
ref_assembly = cc.look_for_unique('in/features', option_ref_assembly)
ref_per_run = cc.all_runs_have_connection('in/features')
allignment_runs = cc.get_runs_with_connections('in/alignments')
for run_id in allignment_runs:
input_paths = cc[run_id]['in/alignments']
if ref_per_run:
# all runs come with their own reference assembly
ref_assembly = cc['in/features'][0]
if option_ref_assembly is None:
# include the file in the dependencies
input_paths.append(ref_assembly)
# Is the alignment gzipped?
root, ext = os.path.splitext(input_paths[0])
is_gzipped = True if ext in ['.gz', '.gzip'] else False
# Is the alignment in SAM or BAM format?
if is_gzipped:
root, ext = os.path.splitext(root)
is_bam = False
is_sam = False
if ext in ['.bam']:
is_bam = True
elif ext in ['.sam']:
is_sam = True
else:
raise StepError(
self, "Input file not in [SB]am format: %s" %
input_paths[0])
if not (bool(is_bam) ^ bool(is_sam)):
raise StepError(
self,
"Alignment file '%s' is neither SAM nor BAM "
"format" %
input_paths[0])
alignments_path = input_paths[0]
with self.declare_run(run_id) as run:
with run.new_exec_group() as exec_group:
with exec_group.add_pipeline() as pipe:
# 1. Read alignment file in 4MB chunks
dd_in = [self.get_tool('dd'),
'ibs=%s' % self.get_option('dd-blocksize'),
'if=%s' % input_paths[0]]
pipe.add_command(dd_in)
if is_gzipped:
# 2. Uncompress file to STDOUT
pigz = [self.get_tool('pigz'),
'--decompress',
'--blocksize',
self.get_option('pigz-blocksize'),
'--processes',
str(self.get_cores()),
'--stdout']
pipe.add_command(pigz)
# 3. Use samtools to generate SAM output
if is_bam:
samtools = [self.get_tool('samtools'), 'view',
'-']
pipe.add_command(samtools)
# 4. Count reads with htseq-count
htseq_count = [
self.get_tool('htseq-count')
]
htseq_count.extend(option_list)
htseq_count.extend(['--format=sam'])
htseq_count.extend(['-', ref_assembly])
# sys.stderr.write("hts-cmd: %s\n" % htseq_count)
pipe.add_command(
htseq_count,
stdout_path=run.add_output_file(
'counts',
'%s-htseq_counts.txt' % run_id,
input_paths
)
)
| kmpf/uap | include/steps/htseq_count.py | Python | gpl-3.0 | 6,953 | [
"HTSeq"
] | b7d74e58b17ff81e42e92d311e97ec18beb840caad04d40f7dca6121b2b76267 |
from __future__ import print_function
import re
splitExpr = re.compile('[\t\ ]')
from rdkit import Chem
def runit(fName):
inLines = open(fName, 'r').readlines()
nFailed = 0
nPassed = 0
nTried = 0
for line in inLines:
if len(line):
smi = splitExpr.split(line)[1]
if smi[-1] == '\n':
smi = smi[:-1]
if smi[-1] == '\r':
smi = smi[:-1]
nTried += 1
m = Chem.MolFromSmiles(smi)
if m:
nPassed += 1
else:
print('\t%s failed' % repr(smi))
print('\tline: %s' % (repr(line)))
nFailed += 1
m = None
print('%d of %d passed' % (nPassed, nTried))
if __name__ == '__main__':
import sys
fName = 'ntp_smiles.txt'
if len(sys.argv) > 1:
fName = sys.argv[1]
runit(fName)
| jandom/rdkit | Code/GraphMol/Wrap/test_data/do_smiles.py | Python | bsd-3-clause | 780 | [
"RDKit"
] | c84f6a890b3dd1613cca1a558892d8385540b644c1e8cbc69b16b9d2a28a9daa |
import numpy as np
import os
def XMLtoCSV(XMLinput):
"""
This function takes as an input the XML file that comes out of the electronic structure calculations and transforms
it into 2 CSV files. The first one is the 'X part' of the data. It contains a sample per line. Each line has a format:
atom label (string), coordinate x (float), coordinate y (float), coordinate z (float), ... for each atom in the system.
The second file contains the 'Y part' of the data. It has a sample per line with the energy of each sample (float).
:XMLinput: an XML file obtained from grid electronic structure calculations
"""
# These are the output files
fileX = open('X.csv', 'w')
fileY = open('Y.csv', 'w')
# This is the input file
inputFile = open(XMLinput, 'r')
# The information of the molecule is contained in the block <cml:molecule>...<cml:molecule>.
# The atom xyz coordinates, the labels and the energy have to be retrieved
# Each configuration corresponds to one line in the CSV files
for line in inputFile:
data = []
if "<cml:molecule>" in line:
for i in range(3):
line = inputFile.next()
while "</cml:atomArray>" not in line:
indexLab = line.find("elementType=")
indexX = line.find("x3=")
indexY = line.find("y3=")
indexYend = line.find("\n")
indexZ = line.find("z3=")
indexZend = line.find("/>")
if indexLab >= 0:
data.append(line[indexLab + 13])
data.append(line[indexX + 4: indexY - 2])
data.append(line[indexY + 4: indexYend - 1])
if indexZ >= 0:
data.append(line[indexZ + 4: indexZend - 1])
line = inputFile.next()
for i in range(len(data)):
fileX.write(data[i])
fileX.write(",")
fileX.write("\n")
if '<property name="Energy"' in line:
line = inputFile.next()
indexEn1 = line.find("value")
indexEn2 = line.find("/>")
energy = float(line[indexEn1 + 7:indexEn2 - 1])
fileY.write(str(energy) + "\n")
return None
def XYZtoCSV(XYZinput):
"""
This function takes as an input the XYZ file that comes out of VR and transforms it into 2 CSV files. The first one
is the 'X part' of the data. It contains a sample per line. Each line has a format:
atom label (string), coordinate x (float), coordinate y (float), coordinate z (float), ... for each atom in the system.
The second file contains the 'Y part' of the data. It has a sample per line with the energy of each sample (float).
Note: This is specific to a file containing C, H, N as the atoms.
:XMLinput: an XML file obtained from grid electronic structure calculations
"""
# These are the output files
fileX = open('X.csv', 'w')
fileY = open('Y.csv', 'w')
# This is the input file
inputFile = open(XYZinput, 'r')
isFirstLine = True
n_atoms = 0
for line in inputFile:
if isFirstLine:
n_atoms = int(line)
isFirstLine = False
index1 = line.find("Energy")
if index1 >= 0:
index2 = line.find("(hartree)")
energy = float(line[index1+8:index2-1])
fileY.write(str(energy))
fileY.write("\n")
if line[0] == "C" or line[0] == "H":
line = line.replace("\n", "")
line = line.replace("\t",",")
fileX.write(line)
fileX.write(",")
if line[0] == "N":
line = line.replace("\n", "")
line = line.replace("\t", ",")
fileX.write(line)
fileX.write("\n")
def extractMolpro(MolproInput):
"""
This function takes one Molpro .out file and returns the geometry, the energy and the partial charges on the atoms.
:MolproInput: the molpro .out file (string)
:return:
:rawData: List of strings with atom label and atom coordinates - example ['C', '0.1, '0.1', '0.1', ...]
:ene: Value of the energy (string)
:partialCh: List of strings with atom label and its partial charge - example ['C', '6.36', 'H', ...]
"""
# This is the input file
inputFile = open(MolproInput, 'r')
# This will contain the data
rawData = []
ene = "0"
partialCh = []
for line in inputFile:
# The geometry is found on the line after the keyword "geometry={"
if "geometry={" in line:
for i in range(7):
line = inputFile.next()
line = line.strip()
lineSplit = line.split(" ")
for j in range(len(lineSplit)):
rawData.append(lineSplit[j])
# The energy is found two lines after the keyword "Final beta occupancy:"
elif "Final beta occupancy:" in line:
line = inputFile.next()
line = inputFile.next()
line = line.strip()
ene = line[len("!RKS STATE 1.1 Energy"):].strip()
elif "Total charge composition:" in line:
line = inputFile.next()
line = inputFile.next()
for i in range(7):
line = inputFile.next()
lineSplit = line.rstrip().split(" ")
lineSplit = filter(None, lineSplit)
partialCh.append(lineSplit[1])
partialCh.append(lineSplit[-2])
return rawData, ene, partialCh
def list_files(dir, key):
"""
This function walks through a directory and makes a list of the files that have a name containing a particular string
:dir: path to the directory to explore
:key: string to look for in file names
:return: list of files containing "key" in their filename
"""
r = [] # List of files to be joined together
subdirs = [x[0] for x in os.walk(dir)]
for subdir in subdirs:
files = os.walk(subdir).next()[2]
for file in files:
isTrajectory = file.find(key)
if isTrajectory >= 0:
r.append(subdir + "/" + file)
return r
def MolproToCSV(directory, key):
"""
This function extracts all the geometries and energies from Molpro .out files contained in a particular directory.
Only the files that have a particular string in their filename will be read. The geometries are then written to X.csv
where each line is a different geometry. The energies are written to Y.csv where each line is the energy of a
different geometry. The partial charges are written to Q.csv
:directory: path to the directory containing the Molpro .out files (string)
:key: string to look for in the file names (string)
"""
# These are the output files
fileX = open('X.csv', 'w')
fileY = open('Y.csv', 'w')
fileZ = open('Q.csv', 'w')
# Obtaining the list of files to mine
fileList = list_files(directory, key)
# Iterating over all the files
for item in fileList:
# Extracting the geometry and the energy from a Molpro out file
geom, ene, partialCh = extractMolpro(item)
if len(geom) != 28 or ene == "0" or len(partialCh) != 14:
print "The following file couldn't be read properly:"
print item + "\n"
continue
for i in range(len(geom)):
fileX.write(geom[i])
fileX.write(",")
fileX.write("\n")
fileY.write(ene + "\n")
for i in range(len(partialCh)):
fileZ.write(partialCh[i])
fileZ.write(",")
fileZ.write("\n")
def loadX(fileX):
"""
This function takes a .csv file that contains on each line a different configuration of the system in the format
"C,0.1,0.1,0.1,H,0.2,0.2,0.2..." and returns a list of lists with the configurations of the system.
The following functions generate .csv files in the correct format:
1. XMLtoCSV
2. XYZtoCSSV
3. MolproToCSV
The function returns a list of lists where each element is a different configuration for a molecule. For example,
for a sample with 3 hydrogen atoms the matrix returned will be:
``[['H',-0.5,0.0,0.0,'H',0.5,0.0,0.0], ['H',-0.3,0.0,0.0,'H',0.3,0.0,0.0], ['H',-0.7,0.0,0.0,'H',0.7,0.0,0.0]]``
:fileX: The .csv file containing the geometries of the system (string)
:return: a list of lists with characters and floats.
"""
if fileX[-4:] != ".csv":
print "Error: the file extension is not .csv"
quit()
inputFile = open(fileX, 'r')
# Creating an empty matrix of the right size
matrixX = []
for line in inputFile:
line = line.replace(",\n","")
listLine = line.split(",")
# converting the numbers to float
for i in range(0,len(listLine)-1,4):
for j in range(3):
listLine[i+j+1] = float(listLine[i+j+1])
matrixX.append(listLine)
inputFile.close()
return matrixX
def loadY(fileY):
"""
This function takes a .csv file containing the energies of a system and returns an array with the energies contained
in the file.
:fileY: the .csv file containing the energies of the system (string)
:return: numpy array of shape (n_samples, 1)
"""
# Checking that the input file has the correct .csv extension
if fileY[-4:] != ".csv":
print "Error: the file extension is not .csv"
quit()
inputFile = open(fileY, 'r')
y_list = []
for line in inputFile:
y_list.append(float(line))
matrixY = np.asarray(y_list).reshape((len(y_list), 1))
inputFile.close()
return matrixY
def loadPd(fileName):
"""
This function takes a .csv file generated after processing the original CSV files with the package PANDAS.
The new csv file contains on each line a different configuration of the system in the format
"C,0.1,0.1,0.1,H,0.2,0.2,0.2..." and at the end of each line there are two values of the energies. The energies are
calculated at 2 different levels of theory and the worse of the two is first.
It returns a list of lists with the configurations of the system and a numpy array of size (N_samples, 1) with the
difference of the two values of the energies.
For example, for a sample with 3 hydrogen atoms the list of lists returned will be:
``[['H',-0.5,0.0,0.0,'H',0.5,0.0,0.0], ['H',-0.3,0.0,0.0,'H',0.3,0.0,0.0], ['H',-0.7,0.0,0.0,'H',0.7,0.0,0.0]]``
:fileX: The .csv file containing the geometries and the energies at 2 levels of theory for the system
:return:
:matrixX: a list of lists with characters and floats.
:matrixY: and a list of energy differences of size (n_samples,)
"""
if fileName[-4:] != ".csv":
print "Error: the file extension is not .csv"
quit()
inputFile = open(fileName, 'r')
# Creating a matrix with the raw data:
rawData = []
matrixX = []
matrixY = []
isFirstLine = True
for line in inputFile:
if isFirstLine == True:
line = inputFile.next()
isFirstLine = False
line = line.replace("\n","")
listLine = line.split(",")
ene = listLine[-2:]
geom = listLine[1:-2]
for i in range(len(ene)):
ene[i] = float(ene[i])
eneDiff = ene[1] - ene[0]
matrixY.append(eneDiff)
for i in range(0,len(geom)-1,4):
for j in range(3):
geom[i+j+1] = float(geom[i+j+1])
matrixX.append(geom)
matrixY = np.asarray(matrixY)
inputFile.close()
return matrixX, matrixY
def loadPd_q(fileName):
"""
This function takes a .csv file generated after processing the original CSV files with the package PANDAS.
The data is arranged with first the geometries in a 'clean datases' arrangement. This means that the headers tell
the atom label for each coordinate. For example, for a molecule with 3 hydrogens, the first two lines of the csv
file for the geometries look like:
``H1x, H1y, H1z, H2x, H2y, H2z, H3x, H3y, H3z
0,1.350508,0.7790238,0.6630868,1.825709,1.257877,-0.1891705,1.848891,1.089646``
Then there are the partial charges and then 2 values of the energies (all in similar format to the geometries).
**Note**: This is specific to the CH4CN system!
:fileName: .csv file (string)
:return:
:matrixX: a list of lists with characters and floats.
:matrixY: a numpy array of energy differences (floats) of size (n_samples,)
:matrixQ: a list of numpy arrays of the partial charges - size (n_samples, n_atoms)
"""
if fileName[-4:] != ".csv":
print "Error: the file extension is not .csv"
quit()
inputFile = open(fileName, 'r')
isFirstLine = True
# Lists that will contain the data
matrixX = []
matrixY = []
matrixQ = []
# Reading the file
for line in inputFile:
if isFirstLine:
line = inputFile.next()
isFirstLine = False
line = line.replace("\n", "")
listLine = line.split(",")
geom = extractGeom(listLine)
eneDiff = extractEneDiff(listLine)
partQ = extractQ(listLine)
matrixX.append(geom)
matrixY.append(eneDiff)
matrixQ.append(partQ)
matrixY = np.asarray(matrixY)
return matrixX, matrixY, matrixQ
def extractGeom(lineList):
"""
Function used by loadPd_q to extract the geometries.
:lineList: line with geometries in clean format, partial charges and energies
:return: list of geometry in format [['H',-0.5,0.0,0.0,'H',0.5,0.0,0.0], ['H',-0.3,0.0,0.0,'H',0.3,0.0,0.0]...
"""
geomPart = lineList[1:22]
atomLab = ["C","H","H","H","H","C","N"]
finalGeom = []
for i in range(len(atomLab)):
finalGeom.append(atomLab[i])
for j in range(3):
finalGeom.append(float(geomPart[3*i+j]))
return finalGeom
def extractEneDiff(lineList):
"""
This function is used by loadPd_q to extract the energy from a line of the clean data set.
:param lineList: line with geometries in clean format, partial charges and energies
:return: energy difference (float)
"""
enePart = lineList[-2:]
eneDiff = float(enePart[1]) - float(enePart[0])
return eneDiff
def extractQ(lineList):
"""
This function is used by loadPd_q to extract the partial charges from a line of the clean data set.
:lineList: line with geometries in clean format, partial charges and energies
:return: numpy array of partial charges of size (n_atoms)
"""
qPart = lineList[22:-2]
for i in range(len(qPart)):
qPart[i] = float(qPart[i])
qPart = np.asarray(qPart)
return qPart
def CSVtoTew(CSVfile):
"""
This function takes a .csv file generated after processing the original CSV files with the package PANDAS where
the data is arranged with first the geometries in a 'clean datases' arrangement. This means that the headers tell
the atom label for each coordinate. For example, for a molecule with 3 hydrogens, the first two lines of the csv
file for the geometries look like:
``H1x, H1y, H1z, H2x, H2y, H2z, H3x, H3y, H3z
0,1.350508,0.7790238,0.6630868,1.825709,1.257877,-0.1891705,1.848891,1.089646``
Then there are the partial charges and then 2 values of the energies (all in similar format to the geometries).
This function turns it into a monotlithic file that can be used to train Tew method.
:CSVfile: the CSV file with the data
:return: None
"""
inputFile = open(CSVfile, 'r')
outputFile = open("/Users/walfits/Repositories/trainingdata/TewDescriptor/monolithic.dat", "w")
isFirstLine = True
for line in inputFile:
if isFirstLine:
line = inputFile.next()
isFirstLine = False
line = line.strip()
lineSplit = line.split(",")
writeToMono(outputFile, lineSplit)
inputFile.close()
outputFile.close()
def writeToMono(outFile, data):
"""
Function used by CSVtoTew to turn a line of the CSV file into the format of a monolythic trajectory file. It then
writes it to the output file.
:outFile: The monolithic trajectory file
:data: a line of the original CSV file
:return: None
"""
ene = float(data[-2]) - float(data[-1])
xyz = data[1:22]
outFile.write("energy xyz\n")
outFile.write(str(ene) + "\n")
for i in range(7):
for j in range(3):
outFile.write("\t" + str(xyz[i+j]))
outFile.write("\n")
| RobertArbon/YAMLP | SciFlow/ImportData.py | Python | mit | 16,678 | [
"Molpro"
] | 6511fc1f78e24d5f7ad2eb767f857e568c7eac5e7ad140b5277655a188905a84 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Loreto Parisi <loretoparisi@gmail.com>
# Copyright (C) 2016 Silvio Olivastri <silvio.olivastri@gmail.com>
# Copyright (C) 2016 Radim Rehurek <radim@rare-technologies.com>
"""This script allows converting word-vectors from word2vec format into Tensorflow 2D tensor and metadata format.
This script used for for word-vector visualization on `Embedding Visualization <http://projector.tensorflow.org/>`_.
How to use
----------
#. Convert your word-vector with this script (for example, we'll use model from
`gensim-data <https://rare-technologies.com/new-download-api-for-pretrained-nlp-models-and-datasets-in-gensim/>`_) ::
python -m gensim.downloader -d glove-wiki-gigaword-50 # download model in word2vec format
python -m gensim.scripts.word2vec2tensor -i ~/gensim-data/glove-wiki-gigaword-50/glove-wiki-gigaword-50.gz \
-o /tmp/my_model_prefix
#. Open http://projector.tensorflow.org/
#. Click "Load Data" button from the left menu.
#. Select "Choose file" in "Load a TSV file of vectors." and choose "/tmp/my_model_prefix_tensor.tsv" file.
#. Select "Choose file" in "Load a TSV file of metadata." and choose "/tmp/my_model_prefix_metadata.tsv" file.
#. ???
#. PROFIT!
For more information about TensorBoard TSV format please visit:
https://www.tensorflow.org/versions/master/how_tos/embedding_viz/
Command line arguments
----------------------
.. program-output:: python -m gensim.scripts.word2vec2tensor --help
:ellipsis: 0, -7
"""
import os
import sys
import logging
import argparse
import gensim
logger = logging.getLogger(__name__)
def word2vec2tensor(word2vec_model_path, tensor_filename, binary=False):
"""Convert file in Word2Vec format and writes two files 2D tensor TSV file.
File "tensor_filename"_tensor.tsv contains word-vectors, "tensor_filename"_metadata.tsv contains words.
Parameters
----------
word2vec_model_path : str
Path to file in Word2Vec format.
tensor_filename : str
Prefix for output files.
binary : bool, optional
True if input file in binary format.
"""
model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_model_path, binary=binary)
outfiletsv = tensor_filename + '_tensor.tsv'
outfiletsvmeta = tensor_filename + '_metadata.tsv'
with open(outfiletsv, 'w+') as file_vector:
with open(outfiletsvmeta, 'w+') as file_metadata:
for word in model.index2word:
file_metadata.write(gensim.utils.to_utf8(word) + gensim.utils.to_utf8('\n'))
vector_row = '\t'.join(str(x) for x in model[word])
file_vector.write(vector_row + '\n')
logger.info("2D tensor file saved to %s", outfiletsv)
logger.info("Tensor metadata file saved to %s", outfiletsvmeta)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__[:-138])
parser.add_argument("-i", "--input", required=True, help="Path to input file in word2vec format")
parser.add_argument("-o", "--output", required=True, help="Prefix path for output files")
parser.add_argument(
"-b", "--binary", action='store_const', const=True, default=False,
help="Set this flag if word2vec model in binary format (default: %(default)s)"
)
args = parser.parse_args()
logger.info("running %s", ' '.join(sys.argv))
word2vec2tensor(args.input, args.output, args.binary)
logger.info("finished running %s", os.path.basename(sys.argv[0]))
| mattilyra/gensim | gensim/scripts/word2vec2tensor.py | Python | lgpl-2.1 | 3,735 | [
"VisIt"
] | 00d40be0011033ef921bd8331594750922156a7ce4ea7c01f79fbf7c972f30f7 |
#!/usr/bin/env python
"""
A linting tool to check for xss vulnerabilities.
"""
from __future__ import print_function
import argparse
import ast
import os
import re
import sys
import textwrap
from enum import Enum
class StringLines(object):
"""
StringLines provides utility methods to work with a string in terms of
lines. As an example, it can convert an index into a line number or column
number (i.e. index into the line).
"""
def __init__(self, string):
"""
Init method.
Arguments:
string: The string to work with.
"""
self._string = string
self._line_start_indexes = self._process_line_breaks(string)
# this is an exclusive index used in the case that the template doesn't
# end with a new line
self.eof_index = len(string)
def _process_line_breaks(self, string):
"""
Creates a list, where each entry represents the index into the string
where the next line break was found.
Arguments:
string: The string in which to find line breaks.
Returns:
A list of indices into the string at which each line begins.
"""
line_start_indexes = [0]
index = 0
while True:
index = string.find('\n', index)
if index < 0:
break
index += 1
line_start_indexes.append(index)
return line_start_indexes
def get_string(self):
"""
Get the original string.
"""
return self._string
def index_to_line_number(self, index):
"""
Given an index, determines the line of the index.
Arguments:
index: The index into the original string for which we want to know
the line number
Returns:
The line number of the provided index.
"""
current_line_number = 0
for line_break_index in self._line_start_indexes:
if line_break_index <= index:
current_line_number += 1
else:
break
return current_line_number
def index_to_column_number(self, index):
"""
Gets the column (i.e. index into the line) for the given index into the
original string.
Arguments:
index: The index into the original string.
Returns:
The column (i.e. index into the line) for the given index into the
original string.
"""
start_index = self.index_to_line_start_index(index)
column = index - start_index + 1
return column
def index_to_line_start_index(self, index):
"""
Gets the index of the start of the line of the given index.
Arguments:
index: The index into the original string.
Returns:
The index of the start of the line of the given index.
"""
line_number = self.index_to_line_number(index)
return self.line_number_to_start_index(line_number)
def index_to_line_end_index(self, index):
"""
Gets the index of the end of the line of the given index.
Arguments:
index: The index into the original string.
Returns:
The index of the end of the line of the given index.
"""
line_number = self.index_to_line_number(index)
return self.line_number_to_end_index(line_number)
def line_number_to_start_index(self, line_number):
"""
Gets the starting index for the provided line number.
Arguments:
line_number: The line number of the line for which we want to find
the start index.
Returns:
The starting index for the provided line number.
"""
return self._line_start_indexes[line_number - 1]
def line_number_to_end_index(self, line_number):
"""
Gets the ending index for the provided line number.
Arguments:
line_number: The line number of the line for which we want to find
the end index.
Returns:
The ending index for the provided line number.
"""
if line_number < len(self._line_start_indexes):
return self._line_start_indexes[line_number]
else:
# an exclusive index in the case that the file didn't end with a
# newline.
return self.eof_index
def line_number_to_line(self, line_number):
"""
Gets the line of text designated by the provided line number.
Arguments:
line_number: The line number of the line we want to find.
Returns:
The line of text designated by the provided line number.
"""
start_index = self._line_start_indexes[line_number - 1]
if len(self._line_start_indexes) == line_number:
line = self._string[start_index:]
else:
end_index = self._line_start_indexes[line_number]
line = self._string[start_index:end_index - 1]
return line
def line_count(self):
"""
Gets the number of lines in the string.
"""
return len(self._line_start_indexes)
class Rules(Enum):
"""
An Enum of each rule which the linter will check.
"""
# IMPORTANT: Do not edit without also updating the docs:
# - http://edx.readthedocs.org/projects/edx-developer-guide/en/latest/conventions/preventing_xss.html#xss-linter
mako_missing_default = 'mako-missing-default'
mako_multiple_page_tags = 'mako-multiple-page-tags'
mako_unparseable_expression = 'mako-unparseable-expression'
mako_unwanted_html_filter = 'mako-unwanted-html-filter'
mako_invalid_html_filter = 'mako-invalid-html-filter'
mako_invalid_js_filter = 'mako-invalid-js-filter'
mako_js_missing_quotes = 'mako-js-missing-quotes'
mako_js_html_string = 'mako-js-html-string'
mako_html_entities = 'mako-html-entities'
mako_unknown_context = 'mako-unknown-context'
underscore_not_escaped = 'underscore-not-escaped'
javascript_jquery_append = 'javascript-jquery-append'
javascript_jquery_prepend = 'javascript-jquery-prepend'
javascript_jquery_insertion = 'javascript-jquery-insertion'
javascript_jquery_insert_into_target = 'javascript-jquery-insert-into-target'
javascript_jquery_html = 'javascript-jquery-html'
javascript_concat_html = 'javascript-concat-html'
javascript_escape = 'javascript-escape'
javascript_interpolate = 'javascript-interpolate'
python_concat_html = 'python-concat-html'
python_custom_escape = 'python-custom-escape'
python_deprecated_display_name = 'python-deprecated-display-name'
python_requires_html_or_text = 'python-requires-html-or-text'
python_close_before_format = 'python-close-before-format'
python_wrap_html = 'python-wrap-html'
python_interpolate_html = 'python-interpolate-html'
python_parse_error = 'python-parse-error'
def __init__(self, rule_id):
self.rule_id = rule_id
class Expression(object):
"""
Represents an arbitrary expression.
An expression can be any type of code snippet. It will sometimes have a
starting and ending delimiter, but not always.
Here are some example expressions::
${x | n, decode.utf8}
<%= x %>
function(x)
"<p>" + message + "</p>"
Other details of note:
- Only a start_index is required for a valid expression.
- If end_index is None, it means we couldn't parse the rest of the
expression.
- All other details of the expression are optional, and are only added if
and when supplied and needed for additional checks. They are not necessary
for the final results output.
"""
def __init__(self, start_index, end_index=None, template=None, start_delim="", end_delim="", strings=None):
"""
Init method.
Arguments:
start_index: the starting index of the expression
end_index: the index immediately following the expression, or None
if the expression was unparseable
template: optional template code in which the expression was found
start_delim: optional starting delimiter of the expression
end_delim: optional ending delimeter of the expression
strings: optional list of ParseStrings
"""
self.start_index = start_index
self.end_index = end_index
self.start_delim = start_delim
self.end_delim = end_delim
self.strings = strings
if template is not None and self.end_index is not None:
self.expression = template[start_index:end_index]
self.expression_inner = self.expression[len(start_delim):-len(end_delim)].strip()
else:
self.expression = None
self.expression_inner = None
class RuleViolation(object):
"""
Base class representing a rule violation which can be used for reporting.
"""
def __init__(self, rule):
"""
Init method.
Arguments:
rule: The Rule which was violated.
"""
self.rule = rule
self.full_path = ''
self.is_disabled = False
def _mark_disabled(self, string, scope_start_string=False):
"""
Performs the disable pragma search and marks the rule as disabled if a
matching pragma is found.
Pragma format::
xss-lint: disable=violation-name,other-violation-name
Arguments:
string: The string of code in which to search for the pragma.
scope_start_string: True if the pragma must be at the start of the
string, False otherwise. The pragma is considered at the start
of the string if it has a maximum of 5 non-whitespace characters
preceding it.
Side Effect:
Sets self.is_disabled as appropriate based on whether the pragma is
found.
"""
pragma_match = re.search(r'xss-lint:\s*disable=([a-zA-Z,-]+)', string)
if pragma_match is None:
return
if scope_start_string:
spaces_count = string.count(' ', 0, pragma_match.start())
non_space_count = pragma_match.start() - spaces_count
if non_space_count > 5:
return
for disabled_rule in pragma_match.group(1).split(','):
if disabled_rule == self.rule.rule_id:
self.is_disabled = True
return
def sort_key(self):
"""
Returns a key that can be sorted on
"""
return (0, 0, self.rule.rule_id)
def first_line(self):
"""
Since a file level rule has no first line, returns empty string.
"""
return ''
def prepare_results(self, full_path, string_lines):
"""
Preps this instance for results reporting.
Arguments:
full_path: Path of the file in violation.
string_lines: A StringLines containing the contents of the file in
violation.
"""
self.full_path = full_path
self._mark_disabled(string_lines.get_string())
def print_results(self, _options, out):
"""
Prints the results represented by this rule violation.
Arguments:
_options: ignored
out: output file
"""
print("{}: {}".format(self.full_path, self.rule.rule_id), file=out)
class ExpressionRuleViolation(RuleViolation):
"""
A class representing a particular rule violation for expressions which
contain more specific details of the location of the violation for reporting
purposes.
"""
def __init__(self, rule, expression):
"""
Init method.
Arguments:
rule: The Rule which was violated.
expression: The Expression that was in violation.
"""
super(ExpressionRuleViolation, self).__init__(rule)
self.expression = expression
self.start_line = 0
self.start_column = 0
self.end_line = 0
self.end_column = 0
self.lines = []
self.is_disabled = False
def _mark_expression_disabled(self, string_lines):
"""
Marks the expression violation as disabled if it finds the disable
pragma anywhere on the first line of the violation, or at the start of
the line preceding the violation.
Pragma format::
xss-lint: disable=violation-name,other-violation-name
Examples::
<% // xss-lint: disable=underscore-not-escaped %>
<%= gettext('Single Line') %>
<%= gettext('Single Line') %><% // xss-lint: disable=underscore-not-escaped %>
Arguments:
string_lines: A StringLines containing the contents of the file in
violation.
Side Effect:
Sets self.is_disabled as appropriate based on whether the pragma is
found.
"""
# disable pragma can be at the start of the preceding line
has_previous_line = self.start_line > 1
if has_previous_line:
line_to_check = string_lines.line_number_to_line(self.start_line - 1)
self._mark_disabled(line_to_check, scope_start_string=True)
if self.is_disabled:
return
# TODO: this should work at end of any line of the violation
# disable pragma can be anywhere on the first line of the violation
line_to_check = string_lines.line_number_to_line(self.start_line)
self._mark_disabled(line_to_check, scope_start_string=False)
def sort_key(self):
"""
Returns a key that can be sorted on
"""
return (self.start_line, self.start_column, self.rule.rule_id)
def first_line(self):
"""
Returns the initial line of code of the violation.
"""
return self.lines[0]
def prepare_results(self, full_path, string_lines):
"""
Preps this instance for results reporting.
Arguments:
full_path: Path of the file in violation.
string_lines: A StringLines containing the contents of the file in
violation.
"""
self.full_path = full_path
start_index = self.expression.start_index
self.start_line = string_lines.index_to_line_number(start_index)
self.start_column = string_lines.index_to_column_number(start_index)
end_index = self.expression.end_index
if end_index is not None:
self.end_line = string_lines.index_to_line_number(end_index)
self.end_column = string_lines.index_to_column_number(end_index)
else:
self.end_line = self.start_line
self.end_column = '?'
for line_number in range(self.start_line, self.end_line + 1):
self.lines.append(string_lines.line_number_to_line(line_number))
self._mark_expression_disabled(string_lines)
def print_results(self, options, out):
"""
Prints the results represented by this rule violation.
Arguments:
options: A list of the following options:
list_files: True to print only file names, and False to print
all violations.
verbose: True for multiple lines of context, False single line.
out: output file
"""
if options['verbose']:
end_line = self.end_line + 1
else:
end_line = self.start_line + 1
for line_number in range(self.start_line, end_line):
if line_number == self.start_line:
column = self.start_column
rule_id = self.rule.rule_id + ":"
else:
column = 1
rule_id = " " * (len(self.rule.rule_id) + 1)
line = self.lines[line_number - self.start_line].encode(encoding='utf-8')
print("{}: {}:{}: {} {}".format(
self.full_path,
line_number,
column,
rule_id,
line
), file=out)
class SummaryResults(object):
"""
Contains the summary results for all violations.
"""
def __init__(self):
"""
Init method.
"""
self.total_violations = 0
self.totals_by_rule = dict.fromkeys(
[rule.rule_id for rule in Rules.__members__.values()], 0
)
def add_violation(self, violation):
"""
Adds a violation to the summary details.
Arguments:
violation: The violation to add to the summary.
"""
self.total_violations += 1
self.totals_by_rule[violation.rule.rule_id] += 1
def print_results(self, options, out):
"""
Prints the results (i.e. violations) in this file.
Arguments:
options: A list of the following options:
list_files: True to print only file names, and False to print
all violations.
rule_totals: If True include totals by rule.
out: output file
"""
if options['list_files'] is False:
if options['rule_totals']:
max_rule_id_len = max(len(rule_id) for rule_id in self.totals_by_rule)
print("", file=out)
for rule_id in sorted(self.totals_by_rule.keys()):
padding = " " * (max_rule_id_len - len(rule_id))
print("{}: {}{} violations".format(rule_id, padding, self.totals_by_rule[rule_id]), file=out)
print("", file=out)
# matches output of eslint for simplicity
print("", file=out)
print("{} violations total".format(self.total_violations), file=out)
class FileResults(object):
"""
Contains the results, or violations, for a file.
"""
def __init__(self, full_path):
"""
Init method.
Arguments:
full_path: The full path for this file.
"""
self.full_path = full_path
self.directory = os.path.dirname(full_path)
self.is_file = os.path.isfile(full_path)
self.violations = []
def prepare_results(self, file_string, line_comment_delim=None):
"""
Prepares the results for output for this file.
Arguments:
file_string: The string of content for this file.
line_comment_delim: A string representing the start of a line
comment. For example "##" for Mako and "//" for JavaScript.
"""
string_lines = StringLines(file_string)
for violation in self.violations:
violation.prepare_results(self.full_path, string_lines)
if line_comment_delim is not None:
self._filter_commented_code(line_comment_delim)
def print_results(self, options, summary_results, out):
"""
Prints the results (i.e. violations) in this file.
Arguments:
options: A list of the following options:
list_files: True to print only file names, and False to print
all violations.
summary_results: A SummaryResults with a summary of the violations.
verbose: True for multiple lines of context, False single line.
out: output file
Side effect:
Updates the passed SummaryResults.
"""
if options['list_files']:
if self.violations is not None and 0 < len(self.violations):
print(self.full_path, file=out)
else:
self.violations.sort(key=lambda violation: violation.sort_key())
for violation in self.violations:
if not violation.is_disabled:
violation.print_results(options, out)
summary_results.add_violation(violation)
def _filter_commented_code(self, line_comment_delim):
"""
Remove any violations that were found in commented out code.
Arguments:
line_comment_delim: A string representing the start of a line
comment. For example "##" for Mako and "//" for JavaScript.
"""
self.violations = [v for v in self.violations if not self._is_commented(v, line_comment_delim)]
def _is_commented(self, violation, line_comment_delim):
"""
Checks if violation line is commented out.
Arguments:
violation: The violation to check
line_comment_delim: A string representing the start of a line
comment. For example "##" for Mako and "//" for JavaScript.
Returns:
True if the first line of the violation is actually commented out,
False otherwise.
"""
if 'parse' in violation.rule.rule_id:
# For parse rules, don't filter them because the comment could be a
# part of the parse issue to begin with.
return False
else:
return violation.first_line().lstrip().startswith(line_comment_delim)
class ParseString(object):
"""
ParseString is the result of parsing a string out of a template.
A ParseString has the following attributes:
start_index: The index of the first quote, or None if none found
end_index: The index following the closing quote, or None if
unparseable
quote_length: The length of the quote. Could be 3 for a Python
triple quote. Or None if none found.
string: the text of the parsed string, or None if none found.
string_inner: the text inside the quotes of the parsed string, or None
if none found.
"""
def __init__(self, template, start_index, end_index):
"""
Init method.
Arguments:
template: The template to be searched.
start_index: The start index to search.
end_index: The end index to search before.
"""
self.end_index = None
self.quote_length = None
self.string = None
self.string_inner = None
self.start_index = self._find_string_start(template, start_index, end_index)
if self.start_index is not None:
result = self._parse_string(template, self.start_index)
if result is not None:
self.end_index = result['end_index']
self.quote_length = result['quote_length']
self.string = result['string']
self.string_inner = result['string_inner']
def _find_string_start(self, template, start_index, end_index):
"""
Finds the index of the end of start of a string. In other words, the
first single or double quote.
Arguments:
template: The template to be searched.
start_index: The start index to search.
end_index: The end index to search before.
Returns:
The start index of the first single or double quote, or None if no
quote was found.
"""
quote_regex = re.compile(r"""['"]""")
start_match = quote_regex.search(template, start_index, end_index)
if start_match is None:
return None
else:
return start_match.start()
def _parse_string(self, template, start_index):
"""
Finds the indices of a string inside a template.
Arguments:
template: The template to be searched.
start_index: The start index of the open quote.
Returns:
A dict containing the following, or None if not parseable:
end_index: The index following the closing quote
quote_length: The length of the quote. Could be 3 for a Python
triple quote.
string: the text of the parsed string
string_inner: the text inside the quotes of the parsed string
"""
quote = template[start_index]
if quote not in ["'", '"']:
raise ValueError("start_index must refer to a single or double quote.")
triple_quote = quote * 3
if template.startswith(triple_quote, start_index):
quote = triple_quote
next_start_index = start_index + len(quote)
while True:
quote_end_index = template.find(quote, next_start_index)
backslash_index = template.find("\\", next_start_index)
if quote_end_index < 0:
return None
if 0 <= backslash_index < quote_end_index:
next_start_index = backslash_index + 2
else:
end_index = quote_end_index + len(quote)
quote_length = len(quote)
string = template[start_index:end_index]
return {
'end_index': end_index,
'quote_length': quote_length,
'string': string,
'string_inner': string[quote_length:-quote_length],
}
class BaseLinter(object):
"""
BaseLinter provides some helper functions that are used by multiple linters.
"""
LINE_COMMENT_DELIM = None
def _is_valid_directory(self, skip_dirs, directory):
"""
Determines if the provided directory is a directory that could contain
a file that needs to be linted.
Arguments:
skip_dirs: The directories to be skipped.
directory: The directory to be linted.
Returns:
True if this directory should be linted for violations and False
otherwise.
"""
if is_skip_dir(skip_dirs, directory):
return False
return True
def _load_file(self, file_full_path):
"""
Loads a file into a string.
Arguments:
file_full_path: The full path of the file to be loaded.
Returns:
A string containing the files contents.
"""
with open(file_full_path, 'r') as input_file:
file_contents = input_file.read()
return file_contents.decode(encoding='utf-8')
def _load_and_check_file_is_safe(self, file_full_path, lint_function, results):
"""
Loads the Python file and checks if it is in violation.
Arguments:
file_full_path: The file to be loaded and linted.
lint_function: A function that will lint for violations. It must
take two arguments:
1) string contents of the file
2) results object
results: A FileResults to be used for this file
Returns:
The file results containing any violations.
"""
file_contents = self._load_file(file_full_path)
lint_function(file_contents, results)
return results
def _find_closing_char_index(
self, start_delim, open_char, close_char, template, start_index, num_open_chars=0, strings=None
):
"""
Finds the index of the closing char that matches the opening char.
For example, this could be used to find the end of a Mako expression,
where the open and close characters would be '{' and '}'.
Arguments:
start_delim: If provided (e.g. '${' for Mako expressions), the
closing character must be found before the next start_delim.
open_char: The opening character to be matched (e.g '{')
close_char: The closing character to be matched (e.g '}')
template: The template to be searched.
start_index: The start index of the last open char.
num_open_chars: The current number of open chars.
strings: A list of ParseStrings already parsed
Returns:
A dict containing the following, or None if unparseable:
close_char_index: The index of the closing character
strings: a list of ParseStrings
"""
strings = [] if strings is None else strings
# Find start index of an uncommented line.
start_index = self._uncommented_start_index(template, start_index)
# loop until we found something useful on an uncommented out line
while start_index is not None:
close_char_index = template.find(close_char, start_index)
if close_char_index < 0:
# If we can't find a close char, let's just quit.
return None
open_char_index = template.find(open_char, start_index, close_char_index)
parse_string = ParseString(template, start_index, close_char_index)
valid_index_list = [close_char_index]
if 0 <= open_char_index:
valid_index_list.append(open_char_index)
if parse_string.start_index is not None:
valid_index_list.append(parse_string.start_index)
min_valid_index = min(valid_index_list)
start_index = self._uncommented_start_index(template, min_valid_index)
if start_index == min_valid_index:
break
if start_index is None:
# No uncommented code to search.
return None
if parse_string.start_index == min_valid_index:
strings.append(parse_string)
if parse_string.end_index is None:
return None
else:
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=parse_string.end_index,
num_open_chars=num_open_chars, strings=strings
)
if open_char_index == min_valid_index:
if start_delim is not None:
# if we find another starting delim, consider this unparseable
start_delim_index = template.find(start_delim, start_index, close_char_index)
if 0 <= start_delim_index < open_char_index:
return None
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=open_char_index + 1,
num_open_chars=num_open_chars + 1, strings=strings
)
if num_open_chars == 0:
return {
'close_char_index': close_char_index,
'strings': strings,
}
else:
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=close_char_index + 1,
num_open_chars=num_open_chars - 1, strings=strings
)
def _uncommented_start_index(self, template, start_index):
"""
Finds the first start_index that is on an uncommented line.
Arguments:
template: The template to be searched.
start_index: The start index of the last open char.
Returns:
If start_index is on an uncommented out line, returns start_index.
Otherwise, returns the start_index of the first line that is
uncommented, if there is one. Otherwise, returns None.
"""
if self.LINE_COMMENT_DELIM is not None:
line_start_index = StringLines(template).index_to_line_start_index(start_index)
uncommented_line_start_index_regex = re.compile("^(?!\s*{})".format(self.LINE_COMMENT_DELIM), re.MULTILINE)
# Finds the line start index of the first uncommented line, including the current line.
match = uncommented_line_start_index_regex.search(template, line_start_index)
if match is None:
# No uncommented lines.
return None
elif match.start() < start_index:
# Current line is uncommented, so return original start_index.
return start_index
else:
# Return start of first uncommented line.
return match.start()
else:
# No line comment delimeter, so this acts as a no-op.
return start_index
class UnderscoreTemplateLinter(BaseLinter):
"""
The linter for Underscore.js template files.
"""
def __init__(self):
"""
Init method.
"""
super(UnderscoreTemplateLinter, self).__init__()
self._skip_underscore_dirs = SKIP_DIRS + ('test',)
def process_file(self, directory, file_name):
"""
Process file to determine if it is an Underscore template file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential underscore file
Returns:
The file results containing any violations.
"""
full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(full_path)
if not self._is_valid_directory(self._skip_underscore_dirs, directory):
return results
if not file_name.lower().endswith('.underscore'):
return results
return self._load_and_check_file_is_safe(full_path, self.check_underscore_file_is_safe, results)
def check_underscore_file_is_safe(self, underscore_template, results):
"""
Checks for violations in an Underscore.js template.
Arguments:
underscore_template: The contents of the Underscore.js template.
results: A file results objects to which violations will be added.
"""
self._check_underscore_expressions(underscore_template, results)
results.prepare_results(underscore_template)
def _check_underscore_expressions(self, underscore_template, results):
"""
Searches for Underscore.js expressions that contain violations.
Arguments:
underscore_template: The contents of the Underscore.js template.
results: A list of results into which violations will be added.
"""
expressions = self._find_unescaped_expressions(underscore_template)
for expression in expressions:
if not self._is_safe_unescaped_expression(expression):
results.violations.append(ExpressionRuleViolation(
Rules.underscore_not_escaped, expression
))
def _is_safe_unescaped_expression(self, expression):
"""
Determines whether an expression is safely escaped, even though it is
using the expression syntax that doesn't itself escape (i.e. <%= ).
In some cases it is ok to not use the Underscore.js template escape
(i.e. <%- ) because the escaping is happening inside the expression.
Safe examples::
<%= HtmlUtils.ensureHtml(message) %>
<%= _.escape(message) %>
Arguments:
expression: The Expression being checked.
Returns:
True if the Expression has been safely escaped, and False otherwise.
"""
if expression.expression_inner.startswith('HtmlUtils.'):
return True
if expression.expression_inner.startswith('_.escape('):
return True
return False
def _find_unescaped_expressions(self, underscore_template):
"""
Returns a list of unsafe expressions.
At this time all expressions that are unescaped are considered unsafe.
Arguments:
underscore_template: The contents of the Underscore.js template.
Returns:
A list of Expressions.
"""
unescaped_expression_regex = re.compile("<%=.*?%>", re.DOTALL)
expressions = []
for match in unescaped_expression_regex.finditer(underscore_template):
expression = Expression(
match.start(), match.end(), template=underscore_template, start_delim="<%=", end_delim="%>"
)
expressions.append(expression)
return expressions
class JavaScriptLinter(BaseLinter):
"""
The linter for JavaScript and CoffeeScript files.
"""
LINE_COMMENT_DELIM = "//"
def __init__(self):
"""
Init method.
"""
super(JavaScriptLinter, self).__init__()
self._skip_javascript_dirs = SKIP_DIRS + ('i18n', 'static/coffee')
self._skip_coffeescript_dirs = SKIP_DIRS
self.underscore_linter = UnderscoreTemplateLinter()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a JavaScript file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential JavaScript file
Returns:
The file results containing any violations.
"""
file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(file_full_path)
if not results.is_file:
return results
if file_name.lower().endswith('.js') and not file_name.lower().endswith('.min.js'):
skip_dirs = self._skip_javascript_dirs
elif file_name.lower().endswith('.coffee'):
skip_dirs = self._skip_coffeescript_dirs
else:
return results
if not self._is_valid_directory(skip_dirs, directory):
return results
return self._load_and_check_file_is_safe(file_full_path, self.check_javascript_file_is_safe, results)
def check_javascript_file_is_safe(self, file_contents, results):
"""
Checks for violations in a JavaScript file.
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
no_caller_check = None
no_argument_check = None
self._check_jquery_function(
file_contents, "append", Rules.javascript_jquery_append, no_caller_check,
self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "prepend", Rules.javascript_jquery_prepend, no_caller_check,
self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "unwrap|wrap|wrapAll|wrapInner|after|before|replaceAll|replaceWith",
Rules.javascript_jquery_insertion, no_caller_check, self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "appendTo|prependTo|insertAfter|insertBefore",
Rules.javascript_jquery_insert_into_target, self._is_jquery_insert_caller_safe, no_argument_check, results
)
self._check_jquery_function(
file_contents, "html", Rules.javascript_jquery_html, no_caller_check,
self._is_jquery_html_argument_safe, results
)
self._check_javascript_interpolate(file_contents, results)
self._check_javascript_escape(file_contents, results)
self._check_concat_with_html(file_contents, Rules.javascript_concat_html, results)
self.underscore_linter.check_underscore_file_is_safe(file_contents, results)
results.prepare_results(file_contents, line_comment_delim=self.LINE_COMMENT_DELIM)
def _get_expression_for_function(self, file_contents, function_start_match):
"""
Returns an expression that matches the function call opened with
function_start_match.
Arguments:
file_contents: The contents of the JavaScript file.
function_start_match: A regex match representing the start of the function
call (e.g. ".escape(").
Returns:
An Expression that best matches the function.
"""
start_index = function_start_match.start()
inner_start_index = function_start_match.end()
result = self._find_closing_char_index(
None, "(", ")", file_contents, start_index=inner_start_index
)
if result is not None:
end_index = result['close_char_index'] + 1
expression = Expression(
start_index, end_index, template=file_contents, start_delim=function_start_match.group(), end_delim=")"
)
else:
expression = Expression(start_index)
return expression
def _check_javascript_interpolate(self, file_contents, results):
"""
Checks that interpolate() calls are safe.
Only use of StringUtils.interpolate() or HtmlUtils.interpolateText()
are safe.
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
# Ignores calls starting with "StringUtils.", because those are safe
regex = re.compile(r"(?<!StringUtils).interpolate\(")
for function_match in regex.finditer(file_contents):
expression = self._get_expression_for_function(file_contents, function_match)
results.violations.append(ExpressionRuleViolation(Rules.javascript_interpolate, expression))
def _check_javascript_escape(self, file_contents, results):
"""
Checks that only necessary escape() are used.
Allows for _.escape(), although this shouldn't be the recommendation.
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
# Ignores calls starting with "_.", because those are safe
regex = regex = re.compile(r"(?<!_).escape\(")
for function_match in regex.finditer(file_contents):
expression = self._get_expression_for_function(file_contents, function_match)
results.violations.append(ExpressionRuleViolation(Rules.javascript_escape, expression))
def _check_jquery_function(self, file_contents, function_names, rule, is_caller_safe, is_argument_safe, results):
"""
Checks that the JQuery function_names (e.g. append(), prepend()) calls
are safe.
Arguments:
file_contents: The contents of the JavaScript file.
function_names: A pipe delimited list of names of the functions
(e.g. "wrap|after|before").
rule: The name of the rule to use for validation errors (e.g.
Rules.javascript_jquery_append).
is_caller_safe: A function to test if caller of the JQuery function
is safe.
is_argument_safe: A function to test if the argument passed to the
JQuery function is safe.
results: A file results objects to which violations will be added.
"""
# Ignores calls starting with "HtmlUtils.", because those are safe
regex = re.compile(r"(?<!HtmlUtils).(?:{})\(".format(function_names))
for function_match in regex.finditer(file_contents):
is_violation = True
expression = self._get_expression_for_function(file_contents, function_match)
if expression.end_index is not None:
start_index = expression.start_index
inner_start_index = function_match.end()
close_paren_index = expression.end_index - 1
function_argument = file_contents[inner_start_index:close_paren_index].strip()
if is_argument_safe is not None and is_caller_safe is None:
is_violation = is_argument_safe(function_argument) is False
elif is_caller_safe is not None and is_argument_safe is None:
line_start_index = StringLines(file_contents).index_to_line_start_index(start_index)
caller_line_start = file_contents[line_start_index:start_index]
is_violation = is_caller_safe(caller_line_start) is False
else:
raise ValueError("Must supply either is_argument_safe, or is_caller_safe, but not both.")
if is_violation:
results.violations.append(ExpressionRuleViolation(rule, expression))
def _is_jquery_argument_safe_html_utils_call(self, argument):
"""
Checks that the argument sent to a jQuery DOM insertion function is a
safe call to HtmlUtils.
A safe argument is of the form:
- HtmlUtils.xxx(anything).toString()
- edx.HtmlUtils.xxx(anything).toString()
Arguments:
argument: The argument sent to the jQuery function (e.g.
append(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
# match on HtmlUtils.xxx().toString() or edx.HtmlUtils
match = re.search(r"(?:edx\.)?HtmlUtils\.[a-zA-Z0-9]+\(.*\)\.toString\(\)", argument)
return match is not None and match.group() == argument
def _is_jquery_argument_safe(self, argument):
"""
Check the argument sent to a jQuery DOM insertion function (e.g.
append()) to check if it is safe.
Safe arguments include:
- the argument can end with ".el", ".$el" (with no concatenation)
- the argument can be a single variable ending in "El" or starting with
"$". For example, "testEl" or "$test".
- the argument can be a single string literal with no HTML tags
- the argument can be a call to $() with the first argument a string
literal with a single HTML tag. For example, ".append($('<br/>'))"
or ".append($('<br/>'))".
- the argument can be a call to HtmlUtils.xxx(html).toString()
Arguments:
argument: The argument sent to the jQuery function (e.g.
append(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
match_variable_name = re.search("[_$a-zA-Z]+[_$a-zA-Z0-9]*", argument)
if match_variable_name is not None and match_variable_name.group() == argument:
if argument.endswith('El') or argument.startswith('$'):
return True
elif argument.startswith('"') or argument.startswith("'"):
# a single literal string with no HTML is ok
# 1. it gets rid of false negatives for non-jquery calls (e.g. graph.append("g"))
# 2. JQuery will treat this as a plain text string and will escape any & if needed.
string = ParseString(argument, 0, len(argument))
if string.string == argument and "<" not in argument:
return True
elif argument.startswith('$('):
# match on JQuery calls with single string and single HTML tag
# Examples:
# $("<span>")
# $("<div/>")
# $("<div/>", {...})
match = re.search(r"""\$\(\s*['"]<[a-zA-Z0-9]+\s*[/]?>['"]\s*[,)]""", argument)
if match is not None:
return True
elif self._is_jquery_argument_safe_html_utils_call(argument):
return True
# check rules that shouldn't use concatenation
elif "+" not in argument:
if argument.endswith('.el') or argument.endswith('.$el'):
return True
return False
def _is_jquery_html_argument_safe(self, argument):
"""
Check the argument sent to the jQuery html() function to check if it is
safe.
Safe arguments to html():
- no argument (i.e. getter rather than setter)
- empty string is safe
- the argument can be a call to HtmlUtils.xxx(html).toString()
Arguments:
argument: The argument sent to html() in code (i.e. html(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
if argument == "" or argument == "''" or argument == '""':
return True
elif self._is_jquery_argument_safe_html_utils_call(argument):
return True
return False
def _is_jquery_insert_caller_safe(self, caller_line_start):
"""
Check that the caller of a jQuery DOM insertion function that takes a
target is safe (e.g. thisEl.appendTo(target)).
If original line was::
draggableObj.iconEl.appendTo(draggableObj.containerEl);
Parameter caller_line_start would be:
draggableObj.iconEl
Safe callers include:
- the caller can be ".el", ".$el"
- the caller can be a single variable ending in "El" or starting with
"$". For example, "testEl" or "$test".
Arguments:
caller_line_start: The line leading up to the jQuery function call.
Returns:
True if the caller is safe, and False otherwise.
"""
# matches end of line for caller, which can't itself be a function
caller_match = re.search(r"(?:\s*|[.])([_$a-zA-Z]+[_$a-zA-Z0-9])*$", caller_line_start)
if caller_match is None:
return False
caller = caller_match.group(1)
if caller is None:
return False
elif caller.endswith('El') or caller.startswith('$'):
return True
elif caller == 'el' or caller == 'parentNode':
return True
return False
def _check_concat_with_html(self, file_contents, rule, results):
"""
Checks that strings with HTML are not concatenated
Arguments:
file_contents: The contents of the JavaScript file.
rule: The rule that was violated if this fails.
results: A file results objects to which violations will be added.
"""
lines = StringLines(file_contents)
last_expression = None
# Match quoted strings that starts with '<' or ends with '>'.
regex_string_with_html = r"""
{quote} # Opening quote.
(
\s*< # Starts with '<' (ignoring spaces)
([^{quote}]|[\\]{quote})* # followed by anything but a closing quote.
| # Or,
([^{quote}]|[\\]{quote})* # Anything but a closing quote
>\s* # ending with '>' (ignoring spaces)
)
{quote} # Closing quote.
"""
# Match single or double quote.
regex_string_with_html = "({}|{})".format(
regex_string_with_html.format(quote="'"),
regex_string_with_html.format(quote='"'),
)
# Match quoted HTML strings next to a '+'.
regex_concat_with_html = re.compile(
r"(\+\s*{string_with_html}|{string_with_html}\s*\+)".format(
string_with_html=regex_string_with_html,
),
re.VERBOSE
)
for match in regex_concat_with_html.finditer(file_contents):
found_new_violation = False
if last_expression is not None:
last_line = lines.index_to_line_number(last_expression.start_index)
# check if violation should be expanded to more of the same line
if last_line == lines.index_to_line_number(match.start()):
last_expression = Expression(
last_expression.start_index, match.end(), template=file_contents
)
else:
results.violations.append(ExpressionRuleViolation(
rule, last_expression
))
found_new_violation = True
else:
found_new_violation = True
if found_new_violation:
last_expression = Expression(
match.start(), match.end(), template=file_contents
)
# add final expression
if last_expression is not None:
results.violations.append(ExpressionRuleViolation(
rule, last_expression
))
class BaseVisitor(ast.NodeVisitor):
"""
Base class for AST NodeVisitor used for Python xss linting.
Important: This base visitor skips all __repr__ function definitions.
"""
def __init__(self, file_contents, results):
"""
Init method.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
"""
super(BaseVisitor, self).__init__()
self.file_contents = file_contents
self.lines = StringLines(self.file_contents)
self.results = results
def node_to_expression(self, node):
"""
Takes a node and translates it to an expression to be used with
violations.
Arguments:
node: An AST node.
"""
line_start_index = self.lines.line_number_to_start_index(node.lineno)
start_index = line_start_index + node.col_offset
if isinstance(node, ast.Str):
# Triple quotes give col_offset of -1 on the last line of the string.
if node.col_offset == -1:
triple_quote_regex = re.compile("""['"]{3}""")
end_triple_quote_match = triple_quote_regex.search(self.file_contents, line_start_index)
open_quote_index = self.file_contents.rfind(end_triple_quote_match.group(), 0, end_triple_quote_match.start())
if open_quote_index > 0:
start_index = open_quote_index
else:
# If we can't find a starting quote, let's assume that what
# we considered the end quote is really the start quote.
start_index = end_triple_quote_match.start()
string = ParseString(self.file_contents, start_index, len(self.file_contents))
return Expression(string.start_index, string.end_index)
else:
return Expression(start_index)
def visit_FunctionDef(self, node):
"""
Skips processing of __repr__ functions, since these sometimes use '<'
for non-HTML purposes.
Arguments:
node: An AST node.
"""
if node.name != '__repr__':
self.generic_visit(node)
class HtmlStringVisitor(BaseVisitor):
"""
Checks for strings that contain HTML. Assumes any string with < or > is
considered potential HTML.
To be used only with strings in context of format or concat.
"""
def __init__(self, file_contents, results, skip_wrapped_html=False):
"""
Init function.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
skip_wrapped_html: True if visitor should skip strings wrapped with
HTML() or Text(), and False otherwise.
"""
super(HtmlStringVisitor, self).__init__(file_contents, results)
self.skip_wrapped_html = skip_wrapped_html
self.unsafe_html_string_nodes = []
self.over_escaped_entity_string_nodes = []
self.has_text_or_html_call = False
def visit_Str(self, node):
"""
When strings are visited, checks if it contains HTML.
Arguments:
node: An AST node.
"""
# Skips '<' (and '>') in regex named groups. For example, "(?P<group>)".
if re.search('[(][?]P<', node.s) is None and re.search('<', node.s) is not None:
self.unsafe_html_string_nodes.append(node)
if re.search(r"&[#]?[a-zA-Z0-9]+;", node.s):
self.over_escaped_entity_string_nodes.append(node)
def visit_Call(self, node):
"""
Skips processing of string contained inside HTML() and Text() calls when
skip_wrapped_html is True.
Arguments:
node: An AST node.
"""
is_html_or_text_call = isinstance(node.func, ast.Name) and node.func.id in ['HTML', 'Text']
if self.skip_wrapped_html and is_html_or_text_call:
self.has_text_or_html_call = True
else:
self.generic_visit(node)
class ContainsFormatVisitor(BaseVisitor):
"""
Checks if there are any nested format() calls.
This visitor is meant to be called on HTML() and Text() ast.Call nodes to
search for any illegal nested format() calls.
"""
def __init__(self, file_contents, results):
"""
Init function.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
"""
super(ContainsFormatVisitor, self).__init__(file_contents, results)
self.contains_format_call = False
def visit_Attribute(self, node):
"""
Simple check for format calls (attribute).
Arguments:
node: An AST node.
"""
# Attribute(expr value, identifier attr, expr_context ctx)
if node.attr == 'format':
self.contains_format_call = True
else:
self.generic_visit(node)
class FormatInterpolateVisitor(BaseVisitor):
"""
Checks if format() interpolates any HTML() or Text() calls. In other words,
are Text() or HTML() calls nested inside the call to format().
This visitor is meant to be called on a format() attribute node.
"""
def __init__(self, file_contents, results):
"""
Init function.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
"""
super(FormatInterpolateVisitor, self).__init__(file_contents, results)
self.interpolates_text_or_html = False
self.format_caller_node = None
def visit_Call(self, node):
"""
Checks all calls. Remembers the caller of the initial format() call, or
in other words, the left-hand side of the call. Also tracks if HTML()
or Text() calls were seen.
Arguments:
node: The AST root node.
"""
if isinstance(node.func, ast.Attribute) and node.func.attr is 'format':
if self.format_caller_node is None:
# Store the caller, or left-hand-side node of the initial
# format() call.
self.format_caller_node = node.func.value
elif isinstance(node.func, ast.Name) and node.func.id in ['HTML', 'Text']:
# found Text() or HTML() call in arguments passed to format()
self.interpolates_text_or_html = True
self.generic_visit(node)
def generic_visit(self, node):
"""
Determines whether or not to continue to visit nodes according to the
following rules:
- Once a Text() or HTML() call has been found, stop visiting more nodes.
- Skip the caller of the outer-most format() call, or in other words,
the left-hand side of the call.
Arguments:
node: The AST root node.
"""
if self.interpolates_text_or_html is False:
if self.format_caller_node is not node:
super(FormatInterpolateVisitor, self).generic_visit(node)
class OuterFormatVisitor(BaseVisitor):
"""
Only visits outer most Python format() calls. These checks are not repeated
for any nested format() calls.
This visitor is meant to be used once from the root.
"""
def visit_Call(self, node):
"""
Checks that format() calls which contain HTML() or Text() use HTML() or
Text() as the caller. In other words, Text() or HTML() must be used
before format() for any arguments to format() that contain HTML() or
Text().
Arguments:
node: An AST node.
"""
if isinstance(node.func, ast.Attribute) and node.func.attr == 'format':
visitor = HtmlStringVisitor(self.file_contents, self.results, True)
visitor.visit(node)
for unsafe_html_string_node in visitor.unsafe_html_string_nodes:
self.results.violations.append(ExpressionRuleViolation(
Rules.python_wrap_html, self.node_to_expression(unsafe_html_string_node)
))
# Do not continue processing child nodes of this format() node.
else:
self.generic_visit(node)
class AllNodeVisitor(BaseVisitor):
"""
Visits all nodes and does not interfere with calls to generic_visit(). This
is used in conjunction with other visitors to check for a variety of
violations.
This visitor is meant to be used once from the root.
"""
def visit_Attribute(self, node):
"""
Checks for uses of deprecated `display_name_with_default_escaped`.
Arguments:
node: An AST node.
"""
if node.attr == 'display_name_with_default_escaped':
self.results.violations.append(ExpressionRuleViolation(
Rules.python_deprecated_display_name, self.node_to_expression(node)
))
self.generic_visit(node)
def visit_Call(self, node):
"""
Checks for a variety of violations:
- Checks that format() calls with nested HTML() or Text() calls use
HTML() or Text() on the left-hand side.
- For each HTML() and Text() call, calls into separate visitor to check
for inner format() calls.
Arguments:
node: An AST node.
"""
if isinstance(node.func, ast.Attribute) and node.func.attr == 'format':
visitor = FormatInterpolateVisitor(self.file_contents, self.results)
visitor.visit(node)
if visitor.interpolates_text_or_html:
format_caller = node.func.value
is_caller_html_or_text = isinstance(format_caller, ast.Call) and \
isinstance(format_caller.func, ast.Name) and \
format_caller.func.id in ['Text', 'HTML']
# If format call has nested Text() or HTML(), then the caller,
# or left-hand-side of the format() call, must be a call to
# Text() or HTML().
if is_caller_html_or_text is False:
self.results.violations.append(ExpressionRuleViolation(
Rules.python_requires_html_or_text, self.node_to_expression(node.func)
))
elif isinstance(node.func, ast.Name) and node.func.id in ['HTML', 'Text']:
visitor = ContainsFormatVisitor(self.file_contents, self.results)
visitor.visit(node)
if visitor.contains_format_call:
self.results.violations.append(ExpressionRuleViolation(
Rules.python_close_before_format, self.node_to_expression(node.func)
))
self.generic_visit(node)
def visit_BinOp(self, node):
"""
Checks for concat using '+' and interpolation using '%' with strings
containing HTML.
"""
rule = None
if isinstance(node.op, ast.Mod):
rule = Rules.python_interpolate_html
elif isinstance(node.op, ast.Add):
rule = Rules.python_concat_html
if rule is not None:
visitor = HtmlStringVisitor(self.file_contents, self.results)
visitor.visit(node.left)
has_illegal_html_string = len(visitor.unsafe_html_string_nodes) > 0
# Create new visitor to clear state.
visitor = HtmlStringVisitor(self.file_contents, self.results)
visitor.visit(node.right)
has_illegal_html_string = has_illegal_html_string or len(visitor.unsafe_html_string_nodes) > 0
if has_illegal_html_string:
self.results.violations.append(ExpressionRuleViolation(
rule, self.node_to_expression(node)
))
self.generic_visit(node)
class PythonLinter(BaseLinter):
"""
The linter for Python files.
The current implementation of the linter does naive Python parsing. It does
not use the parser. One known issue is that parsing errors found inside a
docstring need to be disabled, rather than being automatically skipped.
Skipping docstrings is an enhancement that could be added.
"""
LINE_COMMENT_DELIM = "#"
def __init__(self):
"""
Init method.
"""
super(PythonLinter, self).__init__()
self._skip_python_dirs = SKIP_DIRS + ('tests', 'test/acceptance')
def process_file(self, directory, file_name):
"""
Process file to determine if it is a Python file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential Python file
Returns:
The file results containing any violations.
"""
file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(file_full_path)
if not results.is_file:
return results
if file_name.lower().endswith('.py') is False:
return results
# skip tests.py files
# TODO: Add configuration for files and paths
if file_name.lower().endswith('tests.py'):
return results
# skip this linter code (i.e. xss_linter.py)
if file_name == os.path.basename(__file__):
return results
if not self._is_valid_directory(self._skip_python_dirs, directory):
return results
return self._load_and_check_file_is_safe(file_full_path, self.check_python_file_is_safe, results)
def check_python_file_is_safe(self, file_contents, results):
"""
Checks for violations in a Python file.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
"""
root_node = self.parse_python_code(file_contents, results)
self.check_python_code_is_safe(file_contents, root_node, results)
# Check rules specific to .py files only
# Note that in template files, the scope is different, so you can make
# different assumptions.
if root_node is not None:
# check format() rules that can be run on outer-most format() calls
visitor = OuterFormatVisitor(file_contents, results)
visitor.visit(root_node)
results.prepare_results(file_contents, line_comment_delim=self.LINE_COMMENT_DELIM)
def check_python_code_is_safe(self, python_code, root_node, results):
"""
Checks for violations in Python code snippet. This can also be used for
Python that appears in files other than .py files, like in templates.
Arguments:
python_code: The contents of the Python code.
root_node: The root node of the Python code parsed by AST.
results: A file results objects to which violations will be added.
"""
if root_node is not None:
# check illegal concatenation and interpolation
visitor = AllNodeVisitor(python_code, results)
visitor.visit(root_node)
# check rules parse with regex
self._check_custom_escape(python_code, results)
def parse_python_code(self, python_code, results):
"""
Parses Python code.
Arguments:
python_code: The Python code to be parsed.
Returns:
The root node that was parsed, or None for SyntaxError.
"""
python_code = self._strip_file_encoding(python_code)
try:
return ast.parse(python_code)
except SyntaxError as e:
if e.offset is None:
expression = Expression(0)
else:
lines = StringLines(python_code)
line_start_index = lines.line_number_to_start_index(e.lineno)
expression = Expression(line_start_index + e.offset)
results.violations.append(ExpressionRuleViolation(
Rules.python_parse_error, expression
))
return None
def _strip_file_encoding(self, file_contents):
"""
Removes file encoding from file_contents because the file was already
read into Unicode, and the AST parser complains.
Arguments:
file_contents: The Python file contents.
Returns:
The Python file contents with the encoding stripped.
"""
# PEP-263 Provides Regex for Declaring Encoding
# Example: -*- coding: <encoding name> -*-
# This is only allowed on the first two lines, and it must be stripped
# before parsing, because we have already read into Unicode and the
# AST parser complains.
encoding_regex = re.compile(r"^[ \t\v]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)")
encoding_match = encoding_regex.search(file_contents)
# If encoding comment not found on first line, search second line.
if encoding_match is None:
lines = StringLines(file_contents)
if lines.line_count() >= 2:
encoding_match = encoding_regex.search(lines.line_number_to_line(2))
# If encoding was found, strip it
if encoding_match is not None:
file_contents = file_contents.replace(encoding_match.group(), '#', 1)
return file_contents
def _check_custom_escape(self, file_contents, results):
"""
Checks for custom escaping calls, rather than using a standard escaping
method.
Arguments:
file_contents: The contents of the Python file
results: A list of results into which violations will be added.
"""
for match in re.finditer("(<.*<|<.*<)", file_contents):
expression = Expression(match.start(), match.end())
results.violations.append(ExpressionRuleViolation(
Rules.python_custom_escape, expression
))
class MakoTemplateLinter(BaseLinter):
"""
The linter for Mako template files.
"""
LINE_COMMENT_DELIM = "##"
def __init__(self):
"""
Init method.
"""
super(MakoTemplateLinter, self).__init__()
self.javascript_linter = JavaScriptLinter()
self.python_linter = PythonLinter()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a Mako template file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential Mako file
Returns:
The file results containing any violations.
"""
mako_file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(mako_file_full_path)
if not results.is_file:
return results
if not self._is_valid_directory(directory):
return results
# TODO: When safe-by-default is turned on at the platform level, will we:
# 1. Turn it on for .html only, or
# 2. Turn it on for all files, and have different rulesets that have
# different rules of .xml, .html, .js, .txt Mako templates (e.g. use
# the n filter to turn off h for some of these)?
# For now, we only check .html and .xml files
if not (file_name.lower().endswith('.html') or file_name.lower().endswith('.xml')):
return results
return self._load_and_check_file_is_safe(mako_file_full_path, self._check_mako_file_is_safe, results)
def _is_valid_directory(self, directory):
"""
Determines if the provided directory is a directory that could contain
Mako template files that need to be linted.
Arguments:
directory: The directory to be linted.
Returns:
True if this directory should be linted for Mako template violations
and False otherwise.
"""
if is_skip_dir(SKIP_DIRS, directory):
return False
# TODO: This is an imperfect guess concerning the Mako template
# directories. This needs to be reviewed before turning on safe by
# default at the platform level.
if ('/templates/' in directory) or directory.endswith('/templates'):
return True
return False
def _check_mako_file_is_safe(self, mako_template, results):
"""
Checks for violations in a Mako template.
Arguments:
mako_template: The contents of the Mako template.
results: A file results objects to which violations will be added.
"""
if self._is_django_template(mako_template):
return
has_page_default = self._has_page_default(mako_template, results)
self._check_mako_expressions(mako_template, has_page_default, results)
self._check_mako_python_blocks(mako_template, has_page_default, results)
results.prepare_results(mako_template, line_comment_delim=self.LINE_COMMENT_DELIM)
def _is_django_template(self, mako_template):
"""
Determines if the template is actually a Django template.
Arguments:
mako_template: The template code.
Returns:
True if this is really a Django template, and False otherwise.
"""
if re.search('({%.*%})|({{.*}})|({#.*#})', mako_template) is not None:
return True
return False
def _get_page_tag_count(self, mako_template):
"""
Determines the number of page expressions in the Mako template. Ignores
page expressions that are commented out.
Arguments:
mako_template: The contents of the Mako template.
Returns:
The number of page expressions
"""
count = len(re.findall('<%page ', mako_template, re.IGNORECASE))
count_commented = len(re.findall(r'##\s+<%page ', mako_template, re.IGNORECASE))
return max(0, count - count_commented)
def _has_page_default(self, mako_template, results):
"""
Checks if the Mako template contains the page expression marking it as
safe by default.
Arguments:
mako_template: The contents of the Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds violations regarding page default if necessary
Returns:
True if the template has the page default, and False otherwise.
"""
page_tag_count = self._get_page_tag_count(mako_template)
# check if there are too many page expressions
if 2 <= page_tag_count:
results.violations.append(RuleViolation(Rules.mako_multiple_page_tags))
return False
# make sure there is exactly 1 page expression, excluding commented out
# page expressions, before proceeding
elif page_tag_count != 1:
results.violations.append(RuleViolation(Rules.mako_missing_default))
return False
# check that safe by default (h filter) is turned on
page_h_filter_regex = re.compile('<%page[^>]*expression_filter=(?:"h"|\'h\')[^>]*/>')
page_match = page_h_filter_regex.search(mako_template)
if not page_match:
results.violations.append(RuleViolation(Rules.mako_missing_default))
return page_match
def _check_mako_expressions(self, mako_template, has_page_default, results):
"""
Searches for Mako expressions and then checks if they contain
violations, including checking JavaScript contexts for JavaScript
violations.
Arguments:
mako_template: The contents of the Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
expressions = self._find_mako_expressions(mako_template)
contexts = self._get_contexts(mako_template)
self._check_javascript_contexts(mako_template, contexts, results)
for expression in expressions:
if expression.end_index is None:
results.violations.append(ExpressionRuleViolation(
Rules.mako_unparseable_expression, expression
))
continue
context = self._get_context(contexts, expression.start_index)
self._check_expression_and_filters(mako_template, expression, context, has_page_default, results)
def _check_javascript_contexts(self, mako_template, contexts, results):
"""
Lint the JavaScript contexts for JavaScript violations inside a Mako
template.
Arguments:
mako_template: The contents of the Mako template.
contexts: A list of context dicts with 'type' and 'index'.
results: A list of results into which violations will be added.
Side effect:
Adds JavaScript violations to results.
"""
javascript_start_index = None
for context in contexts:
if context['type'] == 'javascript':
if javascript_start_index < 0:
javascript_start_index = context['index']
else:
if javascript_start_index is not None:
javascript_end_index = context['index']
javascript_code = mako_template[javascript_start_index:javascript_end_index]
self._check_javascript_context(javascript_code, javascript_start_index, results)
javascript_start_index = None
if javascript_start_index is not None:
javascript_code = mako_template[javascript_start_index:]
self._check_javascript_context(javascript_code, javascript_start_index, results)
def _check_javascript_context(self, javascript_code, start_offset, results):
"""
Lint a single JavaScript context for JavaScript violations inside a Mako
template.
Arguments:
javascript_code: The template contents of the JavaScript context.
start_offset: The offset of the JavaScript context inside the
original Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds JavaScript violations to results.
"""
javascript_results = FileResults("")
self.javascript_linter.check_javascript_file_is_safe(javascript_code, javascript_results)
self._shift_and_add_violations(javascript_results, start_offset, results)
def _check_mako_python_blocks(self, mako_template, has_page_default, results):
"""
Searches for Mako python blocks and checks if they contain
violations.
Arguments:
mako_template: The contents of the Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
# Finds Python blocks such as <% ... %>, skipping other Mako start tags
# such as <%def> and <%page>.
python_block_regex = re.compile(r'<%\s(?P<code>.*?)%>', re.DOTALL)
for python_block_match in python_block_regex.finditer(mako_template):
self._check_expression_python(
python_code=python_block_match.group('code'),
start_offset=(python_block_match.start() + len('<% ')),
has_page_default=has_page_default,
results=results
)
def _check_expression_python(self, python_code, start_offset, has_page_default, results):
"""
Lint the Python inside a single Python expression in a Mako template.
Arguments:
python_code: The Python contents of an expression.
start_offset: The offset of the Python content inside the original
Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
Side effect:
Adds Python violations to results.
"""
python_results = FileResults("")
# Dedent expression internals so it is parseable.
# Note that the final columns reported could be off somewhat.
adjusted_python_code = textwrap.dedent(python_code)
first_letter_match = re.search('\w', python_code)
adjusted_first_letter_match = re.search('\w', adjusted_python_code)
if first_letter_match is not None and adjusted_first_letter_match is not None:
start_offset += (first_letter_match.start() - adjusted_first_letter_match.start())
python_code = adjusted_python_code
root_node = self.python_linter.parse_python_code(python_code, python_results)
self.python_linter.check_python_code_is_safe(python_code, root_node, python_results)
# Check mako expression specific Python rules.
if root_node is not None:
visitor = HtmlStringVisitor(python_code, python_results, True)
visitor.visit(root_node)
for unsafe_html_string_node in visitor.unsafe_html_string_nodes:
python_results.violations.append(ExpressionRuleViolation(
Rules.python_wrap_html, visitor.node_to_expression(unsafe_html_string_node)
))
if has_page_default:
for over_escaped_entity_string_node in visitor.over_escaped_entity_string_nodes:
python_results.violations.append(ExpressionRuleViolation(
Rules.mako_html_entities, visitor.node_to_expression(over_escaped_entity_string_node)
))
python_results.prepare_results(python_code, line_comment_delim=self.LINE_COMMENT_DELIM)
self._shift_and_add_violations(python_results, start_offset, results)
def _shift_and_add_violations(self, other_linter_results, start_offset, results):
"""
Adds results from a different linter to the Mako results, after shifting
the offset into the original Mako template.
Arguments:
other_linter_results: Results from another linter.
start_offset: The offset of the linted code, a part of the template,
inside the original Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds violations to results.
"""
# translate the violations into the proper location within the original
# Mako template
for violation in other_linter_results.violations:
expression = violation.expression
expression.start_index += start_offset
if expression.end_index is not None:
expression.end_index += start_offset
results.violations.append(ExpressionRuleViolation(violation.rule, expression))
def _check_expression_and_filters(self, mako_template, expression, context, has_page_default, results):
"""
Checks that the filters used in the given Mako expression are valid
for the given context. Adds violation to results if there is a problem.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
context: The context of the page in which the expression was found
(e.g. javascript, html).
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
if context == 'unknown':
results.violations.append(ExpressionRuleViolation(
Rules.mako_unknown_context, expression
))
return
# Example: finds "| n, h}" when given "${x | n, h}"
filters_regex = re.compile(r'\|([.,\w\s]*)\}')
filters_match = filters_regex.search(expression.expression)
# Check Python code inside expression.
if filters_match is None:
python_code = expression.expression[2:-1]
else:
python_code = expression.expression[2:filters_match.start()]
self._check_expression_python(python_code, expression.start_index + 2, has_page_default, results)
# Check filters.
if filters_match is None:
if context == 'javascript':
results.violations.append(ExpressionRuleViolation(
Rules.mako_invalid_js_filter, expression
))
return
filters = filters_match.group(1).replace(" ", "").split(",")
if filters == ['n', 'decode.utf8']:
# {x | n, decode.utf8} is valid in any context
pass
elif context == 'html':
if filters == ['h']:
if has_page_default:
# suppress this violation if the page default hasn't been set,
# otherwise the template might get less safe
results.violations.append(ExpressionRuleViolation(
Rules.mako_unwanted_html_filter, expression
))
else:
results.violations.append(ExpressionRuleViolation(
Rules.mako_invalid_html_filter, expression
))
elif context == 'javascript':
self._check_js_expression_not_with_html(mako_template, expression, results)
if filters == ['n', 'dump_js_escaped_json']:
# {x | n, dump_js_escaped_json} is valid
pass
elif filters == ['n', 'js_escaped_string']:
# {x | n, js_escaped_string} is valid, if surrounded by quotes
self._check_js_string_expression_in_quotes(mako_template, expression, results)
else:
results.violations.append(ExpressionRuleViolation(
Rules.mako_invalid_js_filter, expression
))
def _check_js_string_expression_in_quotes(self, mako_template, expression, results):
"""
Checks that a Mako expression using js_escaped_string is surrounded by
quotes.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
results: A list of results into which violations will be added.
"""
parse_string = self._find_string_wrapping_expression(mako_template, expression)
if parse_string is None:
results.violations.append(ExpressionRuleViolation(
Rules.mako_js_missing_quotes, expression
))
def _check_js_expression_not_with_html(self, mako_template, expression, results):
"""
Checks that a Mako expression in a JavaScript context does not appear in
a string that also contains HTML.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
results: A list of results into which violations will be added.
"""
parse_string = self._find_string_wrapping_expression(mako_template, expression)
if parse_string is not None and re.search('[<>]', parse_string.string) is not None:
results.violations.append(ExpressionRuleViolation(
Rules.mako_js_html_string, expression
))
def _find_string_wrapping_expression(self, mako_template, expression):
"""
Finds the string wrapping the Mako expression if there is one.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
Returns:
ParseString representing a scrubbed version of the wrapped string,
where the Mako expression was replaced with "${...}", if a wrapped
string was found. Otherwise, returns None if none found.
"""
lines = StringLines(mako_template)
start_index = lines.index_to_line_start_index(expression.start_index)
if expression.end_index is not None:
end_index = lines.index_to_line_end_index(expression.end_index)
else:
return None
# scrub out the actual expression so any code inside the expression
# doesn't interfere with rules applied to the surrounding code (i.e.
# checking JavaScript).
scrubbed_lines = "".join((
mako_template[start_index:expression.start_index],
"${...}",
mako_template[expression.end_index:end_index]
))
adjusted_start_index = expression.start_index - start_index
start_index = 0
while True:
parse_string = ParseString(scrubbed_lines, start_index, len(scrubbed_lines))
# check for validly parsed string
if 0 <= parse_string.start_index < parse_string.end_index:
# check if expression is contained in the given string
if parse_string.start_index < adjusted_start_index < parse_string.end_index:
return parse_string
else:
# move to check next string
start_index = parse_string.end_index
else:
break
return None
def _get_contexts(self, mako_template):
"""
Returns a data structure that represents the indices at which the
template changes from HTML context to JavaScript and back.
Return:
A list of dicts where each dict contains:
- index: the index of the context.
- type: the context type (e.g. 'html' or 'javascript').
"""
contexts_re = re.compile(
r"""
<script.*?> | # script tag start
</script> | # script tag end
<%static:require_module(_async)?.*?> | # require js script tag start (optionally the _async version)
</%static:require_module(_async)?> | # require js script tag end (optionally the _async version)
<%static:webpack.*?> | # webpack script tag start
</%static:webpack> | # webpack script tag end
<%block[ ]*name=['"]requirejs['"]\w*> | # require js tag start
</%block> # require js tag end
""",
re.VERBOSE | re.IGNORECASE
)
media_type_re = re.compile(r"""type=['"].*?['"]""", re.IGNORECASE)
contexts = [{'index': 0, 'type': 'html'}]
javascript_types = [
'text/javascript', 'text/ecmascript', 'application/ecmascript', 'application/javascript',
'text/x-mathjax-config', 'json/xblock-args', 'application/json',
]
html_types = ['text/template']
for context in contexts_re.finditer(mako_template):
match_string = context.group().lower()
if match_string.startswith("<script"):
match_type = media_type_re.search(match_string)
context_type = 'javascript'
if match_type is not None:
# get media type (e.g. get text/javascript from
# type="text/javascript")
match_type = match_type.group()[6:-1].lower()
if match_type in html_types:
context_type = 'html'
elif match_type not in javascript_types:
context_type = 'unknown'
contexts.append({'index': context.end(), 'type': context_type})
elif match_string.startswith("</"):
contexts.append({'index': context.start(), 'type': 'html'})
else:
contexts.append({'index': context.end(), 'type': 'javascript'})
return contexts
def _get_context(self, contexts, index):
"""
Gets the context (e.g. javascript, html) of the template at the given
index.
Arguments:
contexts: A list of dicts where each dict contains the 'index' of the context
and the context 'type' (e.g. 'html' or 'javascript').
index: The index for which we want the context.
Returns:
The context (e.g. javascript or html) for the given index.
"""
current_context = contexts[0]['type']
for context in contexts:
if context['index'] <= index:
current_context = context['type']
else:
break
return current_context
def _find_mako_expressions(self, mako_template):
"""
Finds all the Mako expressions in a Mako template and creates a list
of dicts for each expression.
Arguments:
mako_template: The content of the Mako template.
Returns:
A list of Expressions.
"""
start_delim = '${'
start_index = 0
expressions = []
while True:
start_index = mako_template.find(start_delim, start_index)
if start_index < 0:
break
# If start of mako expression is commented out, skip it.
uncommented_start_index = self._uncommented_start_index(mako_template, start_index)
if uncommented_start_index != start_index:
start_index = uncommented_start_index
continue
result = self._find_closing_char_index(
start_delim, '{', '}', mako_template, start_index=start_index + len(start_delim)
)
if result is None:
expression = Expression(start_index)
# for parsing error, restart search right after the start of the
# current expression
start_index = start_index + len(start_delim)
else:
close_char_index = result['close_char_index']
expression = mako_template[start_index:close_char_index + 1]
expression = Expression(
start_index,
end_index=close_char_index + 1,
template=mako_template,
start_delim=start_delim,
end_delim='}',
strings=result['strings'],
)
# restart search after the current expression
start_index = expression.end_index
expressions.append(expression)
return expressions
SKIP_DIRS = (
'.git',
'.pycharm_helpers',
'common/static/xmodule/modules',
'common/static/bundles',
'perf_tests',
'node_modules',
'reports/diff_quality',
'scripts/tests/templates',
'spec',
'test_root',
'vendor',
)
def is_skip_dir(skip_dirs, directory):
"""
Determines whether a directory should be skipped or linted.
Arguments:
skip_dirs: The configured directories to be skipped.
directory: The current directory to be tested.
Returns:
True if the directory should be skipped, and False otherwise.
"""
for skip_dir in skip_dirs:
skip_dir_regex = re.compile(
"(.*/)*{}(/.*)*".format(re.escape(skip_dir)))
if skip_dir_regex.match(directory) is not None:
return True
return False
def _process_file(full_path, template_linters, options, summary_results, out):
"""
For each linter, lints the provided file. This means finding and printing
violations.
Arguments:
full_path: The full path of the file to lint.
template_linters: A list of linting objects.
options: A list of the options.
summary_results: A SummaryResults with a summary of the violations.
out: output file
"""
num_violations = 0
directory = os.path.dirname(full_path)
file_name = os.path.basename(full_path)
for template_linter in template_linters:
results = template_linter.process_file(directory, file_name)
results.print_results(options, summary_results, out)
def _process_os_dir(directory, files, template_linters, options, summary_results, out):
"""
Calls out to lint each file in the passed list of files.
Arguments:
directory: Directory being linted.
files: All files in the directory to be linted.
template_linters: A list of linting objects.
options: A list of the options.
summary_results: A SummaryResults with a summary of the violations.
out: output file
"""
for current_file in sorted(files, key=lambda s: s.lower()):
full_path = os.path.join(directory, current_file)
_process_file(full_path, template_linters, options, summary_results, out)
def _process_os_dirs(starting_dir, template_linters, options, summary_results, out):
"""
For each linter, lints all the directories in the starting directory.
Arguments:
starting_dir: The initial directory to begin the walk.
template_linters: A list of linting objects.
options: A list of the options.
summary_results: A SummaryResults with a summary of the violations.
out: output file
"""
for root, dirs, files in os.walk(starting_dir):
if is_skip_dir(SKIP_DIRS, root):
del dirs
continue
dirs.sort(key=lambda s: s.lower())
_process_os_dir(root, files, template_linters, options, summary_results, out)
def _lint(file_or_dir, template_linters, options, summary_results, out):
"""
For each linter, lints the provided file or directory.
Arguments:
file_or_dir: The file or initial directory to lint.
template_linters: A list of linting objects.
options: A list of the options.
summary_results: A SummaryResults with a summary of the violations.
out: output file
"""
if file_or_dir is not None and os.path.isfile(file_or_dir):
_process_file(file_or_dir, template_linters, options, summary_results, out)
else:
directory = "."
if file_or_dir is not None:
if os.path.exists(file_or_dir):
directory = file_or_dir
else:
raise ValueError("Path [{}] is not a valid file or directory.".format(file_or_dir))
_process_os_dirs(directory, template_linters, options, summary_results, out)
summary_results.print_results(options, out)
def main():
"""
Used to execute the linter. Use --help option for help.
Prints all violations.
"""
epilog = "For more help using the xss linter, including details on how to\n"
epilog += "understand and fix any violations, read the docs here:\n"
epilog += "\n"
# pylint: disable=line-too-long
epilog += " http://edx.readthedocs.org/projects/edx-developer-guide/en/latest/conventions/preventing_xss.html#xss-linter\n"
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Checks that templates are safe.',
epilog=epilog,
)
parser.add_argument(
'--list-files', dest='list_files', action='store_true',
help='Only display the filenames that contain violations.'
)
parser.add_argument(
'--rule-totals', dest='rule_totals', action='store_true',
help='Display the totals for each rule.'
)
parser.add_argument(
'--verbose', dest='verbose', action='store_true',
help='Print multiple lines where possible for additional context of violations.'
)
parser.add_argument('path', nargs="?", default=None, help='A file to lint or directory to recursively lint.')
args = parser.parse_args()
options = {
'list_files': args.list_files,
'rule_totals': args.rule_totals,
'verbose': args.verbose,
}
template_linters = [MakoTemplateLinter(), UnderscoreTemplateLinter(), JavaScriptLinter(), PythonLinter()]
summary_results = SummaryResults()
_lint(args.path, template_linters, options, summary_results, out=sys.stdout)
if __name__ == "__main__":
main()
| Lektorium-LLC/edx-platform | scripts/xss_linter.py | Python | agpl-3.0 | 101,168 | [
"VisIt"
] | a4131fc4c6e135a5092eb9749c7a18142959c49e9ca7e87c4bd7a5b7f8356611 |
#!/usr/bin/env python
# Mesa 3-D graphics library
#
# Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Generate the mesa.def file for Windows.
#
# Usage:
# mesadef.py >mesa.def
# Then copy to src/mesa/drivers/windows/gdi
#
# Dependencies:
# The apispec file must be in the current directory.
import apiparser
import string
def PrintHead():
print '; DO NOT EDIT - This file generated automatically by mesadef.py script'
print 'DESCRIPTION \'Mesa (OpenGL work-alike) for Win32\''
print 'VERSION 6.0'
print ';'
print '; Module definition file for Mesa (OPENGL32.DLL)'
print ';'
print '; Note: The OpenGL functions use the STDCALL'
print '; function calling convention. Microsoft\'s'
print '; OPENGL32 uses this convention and so must the'
print '; Mesa OPENGL32 so that the Mesa DLL can be used'
print '; as a drop-in replacement.'
print ';'
print '; The linker exports STDCALL entry points with'
print '; \'decorated\' names; e.g., _glBegin@0, where the'
print '; trailing number is the number of bytes of '
print '; parameter data pushed onto the stack. The'
print '; callee is responsible for popping this data'
print '; off the stack, usually via a RETF n instruction.'
print ';'
print '; However, the Microsoft OPENGL32.DLL does not export'
print '; the decorated names, even though the calling convention'
print '; is STDCALL. So, this module definition file is'
print '; needed to force the Mesa OPENGL32.DLL to export the'
print '; symbols in the same manner as the Microsoft DLL.'
print '; Were it not for this problem, this file would not'
print '; be needed (for the gl* functions) since the entry'
print '; points are compiled with dllexport declspec.'
print ';'
print '; However, this file is still needed to export "internal"'
print '; Mesa symbols for the benefit of the OSMESA32.DLL.'
print ';'
print 'EXPORTS'
return
#enddef
def PrintTail():
print ';'
print '; WGL API'
print '\twglChoosePixelFormat'
print '\twglCopyContext'
print '\twglCreateContext'
print '\twglCreateLayerContext'
print '\twglDeleteContext'
print '\twglDescribeLayerPlane'
print '\twglDescribePixelFormat'
print '\twglGetCurrentContext'
print '\twglGetCurrentDC'
print '\twglGetExtensionsStringARB'
print '\twglGetLayerPaletteEntries'
print '\twglGetPixelFormat'
print '\twglGetProcAddress'
print '\twglMakeCurrent'
print '\twglRealizeLayerPalette'
print '\twglSetLayerPaletteEntries'
print '\twglSetPixelFormat'
print '\twglShareLists'
print '\twglSwapBuffers'
print '\twglSwapLayerBuffers'
print '\twglUseFontBitmapsA'
print '\twglUseFontBitmapsW'
print '\twglUseFontOutlinesA'
print '\twglUseFontOutlinesW'
print ';'
print '; Mesa internals - mostly for OSMESA'
print '\t_ac_CreateContext'
print '\t_ac_DestroyContext'
print '\t_ac_InvalidateState'
print '\t_glapi_get_context'
print '\t_glapi_get_proc_address'
print '\t_mesa_buffer_data'
print '\t_mesa_buffer_map'
print '\t_mesa_buffer_subdata'
print '\t_mesa_choose_tex_format'
print '\t_mesa_compressed_texture_size'
print '\t_mesa_create_framebuffer'
print '\t_mesa_create_visual'
print '\t_mesa_delete_buffer_object'
print '\t_mesa_delete_texture_object'
print '\t_mesa_destroy_framebuffer'
print '\t_mesa_destroy_visual'
print '\t_mesa_enable_1_3_extensions'
print '\t_mesa_enable_1_4_extensions'
print '\t_mesa_enable_1_5_extensions'
print '\t_mesa_enable_sw_extensions'
print '\t_mesa_error'
print '\t_mesa_free_context_data'
print '\t_mesa_get_current_context'
print '\t_mesa_init_default_imports'
print '\t_mesa_initialize_context'
print '\t_mesa_make_current'
print '\t_mesa_new_buffer_object'
print '\t_mesa_new_texture_object'
print '\t_mesa_problem'
print '\t_mesa_store_compressed_teximage1d'
print '\t_mesa_store_compressed_teximage2d'
print '\t_mesa_store_compressed_teximage3d'
print '\t_mesa_store_compressed_texsubimage1d'
print '\t_mesa_store_compressed_texsubimage2d'
print '\t_mesa_store_compressed_texsubimage3d'
print '\t_mesa_store_teximage1d'
print '\t_mesa_store_teximage2d'
print '\t_mesa_store_teximage3d'
print '\t_mesa_store_texsubimage1d'
print '\t_mesa_store_texsubimage2d'
print '\t_mesa_store_texsubimage3d'
print '\t_mesa_test_proxy_teximage'
print '\t_mesa_Viewport'
print '\t_mesa_meta_CopyColorSubTable'
print '\t_mesa_meta_CopyColorTable'
print '\t_mesa_meta_CopyConvolutionFilter1D'
print '\t_mesa_meta_CopyConvolutionFilter2D'
print '\t_mesa_meta_CopyTexImage1D'
print '\t_mesa_meta_CopyTexImage2D'
print '\t_mesa_meta_CopyTexSubImage1D'
print '\t_mesa_meta_CopyTexSubImage2D'
print '\t_mesa_meta_CopyTexSubImage3D'
print '\t_swrast_Accum'
print '\t_swrast_alloc_buffers'
print '\t_swrast_Bitmap'
print '\t_swrast_CopyPixels'
print '\t_swrast_DrawPixels'
print '\t_swrast_GetDeviceDriverReference'
print '\t_swrast_Clear'
print '\t_swrast_choose_line'
print '\t_swrast_choose_triangle'
print '\t_swrast_CreateContext'
print '\t_swrast_DestroyContext'
print '\t_swrast_InvalidateState'
print '\t_swrast_ReadPixels'
print '\t_swrast_zbuffer_address'
print '\t_swsetup_Wakeup'
print '\t_swsetup_CreateContext'
print '\t_swsetup_DestroyContext'
print '\t_swsetup_InvalidateState'
print '\t_tnl_CreateContext'
print '\t_tnl_DestroyContext'
print '\t_tnl_InvalidateState'
print '\t_tnl_MakeCurrent'
print '\t_tnl_run_pipeline'
#enddef
records = []
def FindOffset(funcName):
for (name, alias, offset) in records:
if name == funcName:
return offset
#endif
#endfor
return -1
#enddef
def EmitEntry(name, returnType, argTypeList, argNameList, alias, offset):
if alias == '':
dispatchName = name
else:
dispatchName = alias
if offset < 0:
offset = FindOffset(dispatchName)
if offset >= 0 and string.find(name, "unused") == -1:
print '\tgl%s' % (name)
# save this info in case we need to look up an alias later
records.append((name, dispatchName, offset))
#enddef
PrintHead()
apiparser.ProcessSpecFile("APIspec", EmitEntry)
PrintTail()
| execunix/vinos | xsrc/external/mit/MesaLib/dist/src/mapi/glapi/gen/mesadef.py | Python | apache-2.0 | 7,558 | [
"Brian"
] | ef3874cb1180faf7b56a096cb27c4c65ff05886d3841e807054390346747df75 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to evaluating models with quantity parameters
"""
import numpy as np
import pytest
from numpy.testing import assert_allclose
from ..core import Model
from ..models import Gaussian1D, Shift, Scale, Pix2Sky_TAN
from ... import units as u
from ...units import UnitsError
from ...tests.helper import assert_quantity_allclose
# We start off by taking some simple cases where the units are defined by
# whatever the model is initialized with, and we check that the model evaluation
# returns quantities.
def test_evaluate_with_quantities():
"""
Test evaluation of a single model with Quantity parameters that do
not explicitly require units.
"""
# We create two models here - one with quantities, and one without. The one
# without is used to create the reference values for comparison.
g = Gaussian1D(1, 1, 0.1)
gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# We first check that calling the Gaussian with quantities returns the
# expected result
assert_quantity_allclose(gq(1 * u.m), g(1) * u.J)
# Units have to be specified for the Gaussian with quantities - if not, an
# error is raised
with pytest.raises(UnitsError) as exc:
gq(1)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', (dimensionless), could not be "
"converted to required input units of m (length)")
# However, zero is a special case
assert_quantity_allclose(gq(0), g(0) * u.J)
# We can also evaluate models with equivalent units
assert_allclose(gq(0.0005 * u.km).value, g(0.5))
# But not with incompatible units
with pytest.raises(UnitsError) as exc:
gq(3 * u.s)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', s (time), could not be "
"converted to required input units of m (length)")
# We also can't evaluate the model without quantities with a quantity
with pytest.raises(UnitsError) as exc:
g(3 * u.m)
# TODO: determine what error message should be here
# assert exc.value.args[0] == ("Units of input 'x', m (length), could not be "
# "converted to required dimensionless input")
def test_evaluate_with_quantities_and_equivalencies():
"""
We now make sure that equivalencies are correctly taken into account
"""
g = Gaussian1D(1 * u.Jy, 10 * u.nm, 2 * u.nm)
# We aren't setting the equivalencies, so this won't work
with pytest.raises(UnitsError) as exc:
g(30 * u.PHz)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', PHz (frequency), could "
"not be converted to required input units of "
"nm (length)")
# But it should now work if we pass equivalencies when evaluating
assert_quantity_allclose(g(30 * u.PHz, equivalencies={'x': u.spectral()}),
g(9.993081933333332 * u.nm))
class MyTestModel(Model):
inputs = ('a', 'b')
outputs = ('f',)
def evaluate(self, a, b):
print('a', a)
print('b', b)
return a * b
class TestInputUnits():
def setup_method(self, method):
self.model = MyTestModel()
def test_evaluate(self):
# We should be able to evaluate with anything
assert_quantity_allclose(self.model(3, 5), 15)
assert_quantity_allclose(self.model(4 * u.m, 5), 20 * u.m)
assert_quantity_allclose(self.model(3 * u.deg, 5), 15 * u.deg)
def test_input_units(self):
self.model.input_units = {'a': u.deg}
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
assert_quantity_allclose(self.model(4 * u.rad, 2 * u.s), 8 * u.rad * u.s)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', s (time), could not be "
"converted to required input units of deg (angle)")
with pytest.raises(UnitsError) as exc:
self.model(3, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', (dimensionless), could "
"not be converted to required input units of deg (angle)")
def test_input_units_allow_dimensionless(self):
self.model.input_units = {'a': u.deg}
self.model.input_units_allow_dimensionless = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', s (time), could not be "
"converted to required input units of deg (angle)")
assert_quantity_allclose(self.model(3, 3), 9)
def test_input_units_strict(self):
self.model.input_units = {'a': u.deg}
self.model.input_units_strict = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
result = self.model(np.pi * u.rad, 2)
assert_quantity_allclose(result, 360 * u.deg)
assert result.unit is u.deg
def test_input_units_equivalencies(self):
self.model.input_units = {'a': u.micron}
with pytest.raises(UnitsError) as exc:
self.model(3 * u.PHz, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', PHz (frequency), could "
"not be converted to required input units of "
"micron (length)")
self.model.input_units_equivalencies = {'a': u.spectral()}
assert_quantity_allclose(self.model(3 * u.PHz, 3),
3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral()))
def test_return_units(self):
self.model.input_units = {'a': u.deg}
self.model.return_units = {'f': u.rad}
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_return_units_scalar(self):
# Check that return_units also works when giving a single unit since
# there is only one output, so is unambiguous.
self.model.input_units = {'a': u.deg}
self.model.return_units = u.rad
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_and_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.arcsecond, 20 * u.arcsecond)
assert_quantity_allclose(out[0], 10 * u.deg + 10 * u.arcsec)
assert_quantity_allclose(out[1], 10 * u.deg + 20 * u.arcsec)
def test_plus_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 + s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 20 * u.arcsec)
def test_compound_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 10 * u.arcsec)
def test_compound_input_units_fail():
"""
Test incompatible units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_incompatible_units_fail():
"""
Test incompatible model units in chain.
"""
s1 = Shift(10 * u.pix)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_pipe_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a chained model
(which has one input).
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.pix, equivalencies={'x': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out, 25 * u.deg)
def test_compound_and_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a compsite model
with two inputs.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.pix, 10 * u.pix, equivalencies={'x0': u.pixel_scale(0.5 * u.deg / u.pix),
'x1': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out[0], 15 * u.deg)
assert_quantity_allclose(out[1], 15 * u.deg)
def test_compound_input_units_equivalencies():
"""
Test setting input_units_equivalencies on one of the models.
"""
s1 = Shift(10 * u.deg)
s1.input_units_equivalencies = {'x': u.pixel_scale(0.5 * u.deg / u.pix)}
s2 = Shift(10 * u.deg)
sp = Shift(10 * u.pix)
cs = s1 | s2
out = cs(10 * u.pix)
assert_quantity_allclose(out, 25 * u.deg)
cs = sp | s1
out = cs(10 * u.pix)
assert_quantity_allclose(out, 20 * u.deg)
cs = s1 & s2
cs = cs.rename('TestModel')
out = cs(20 * u.pix, 10 * u.deg)
assert_quantity_allclose(out, 20 * u.deg)
with pytest.raises(UnitsError) as exc:
out = cs(20 * u.pix, 10 * u.pix)
assert exc.value.args[0] == "TestModel: Units of input 'x1', pix (unknown), could not be converted to required input units of deg (angle)"
def test_compound_input_units_strict():
"""
Test setting input_units_strict on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s2 = Scale(2)
cs = s1 | s2
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s2 | s1
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s1 & s2
out = cs(10 * u.arcsec, 10 * u.arcsec)
assert_quantity_allclose(out, 20 * u.arcsec)
assert out[0].unit is u.deg
assert out[1].unit is u.arcsec
def test_compound_input_units_allow_dimensionless():
"""
Test setting input_units_allow_dimensionless on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s1.input_units_allow_dimensionless = True
s2 = Scale(2)
cs = s1 | s2
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == "TestModel: Units of input 'x', m (length), could not be converted to required input units of deg (angle)"
s1.input_units_allow_dimensionless = False
cs = s1 | s2
cs = cs.rename('TestModel')
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == "TestModel: Units of input 'x', (dimensionless), could not be converted to required input units of deg (angle)"
s1.input_units_allow_dimensionless = True
cs = s2 | s1
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == "ScaleDegrees: Units of input 'x', m (length), could not be converted to required input units of deg (angle)"
s1.input_units_allow_dimensionless = False
cs = s2 | s1
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == "ScaleDegrees: Units of input 'x', (dimensionless), could not be converted to required input units of deg (angle)"
s1.input_units_allow_dimensionless = True
s1 = ScaleDegrees(2)
s1.input_units_allow_dimensionless = True
s2 = ScaleDegrees(2)
s2.input_units_allow_dimensionless = False
cs = s1 & s2
cs = cs.rename('TestModel')
out = cs(10, 10 * u.arcsec)
assert_quantity_allclose(out[0], 20 * u.one)
assert_quantity_allclose(out[1], 20 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10, 10)
assert exc.value.args[0] == "TestModel: Units of input 'x1', (dimensionless), could not be converted to required input units of deg (angle)"
def test_compound_return_units():
"""
Test that return_units on the first model in the chain is respected for the
input to the second.
"""
class PassModel(Model):
inputs = ('x', 'y')
outputs = ('x', 'y')
@property
def input_units(self):
""" Input units. """
return {'x': u.deg, 'y': u.deg}
@property
def return_units(self):
""" Output units. """
return {'x': u.deg, 'y': u.deg}
def evaluate(self, x, y):
return x.value, y.value
cs = Pix2Sky_TAN() | PassModel()
assert_quantity_allclose(cs(0*u.deg, 0*u.deg), (0, 90)*u.deg)
| DougBurke/astropy | astropy/modeling/tests/test_quantities_evaluation.py | Python | bsd-3-clause | 13,490 | [
"Gaussian"
] | 3e7e6ed9490ca8754e76d7fded5f5aa069cb392d13ee88d07474bf007e30ebe2 |
# Copyright (C) 2018 Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of yambopy
#
#
import unittest
import os
import shutil as sh
from yambopy.analyse import YamboAnalyser
from yambopy.io.outputfile import YamboOut
from qepy.lattice import Path
test_path = os.path.join(os.path.dirname(__file__),'..','data','refs','gw_conv')
class TestYamboAnalyse(unittest.TestCase):
def setUp(self):
""" Read the yambo GW output files
"""
if os.path.isdir('gw_conv'): sh.rmtree('gw_conv')
sh.copytree(test_path,'gw_conv')
for dirpath,dirnames,filenames in os.walk('gw_conv'):
if YamboOut.has_output(dirpath):
y = YamboOut(dirpath,save_folder='gw_conv')
y.pack()
def test_yamboanalyse_gw_si(self):
""" Analyse the yambo GW .json output files
"""
y = YamboAnalyser('gw_conv')
netcdf_files = y.get_files_type('netcdf_gw')
keys = sorted(netcdf_files.keys())
assert keys == ['BndsRnXp_1_20', 'BndsRnXp_1_30', 'FFTGvecs_00010', 'FFTGvecs_00015',
'NGsBlkXp_00002', 'NGsBlkXp_00005', 'reference']
netcdf_files = y.get_files_type('netcdf_gw','FFTGvecs')
keys = sorted(netcdf_files.keys())
assert keys == ['FFTGvecs_00010', 'FFTGvecs_00015']
netcdf_files = y.get_files_type('netcdf_gw',('FFTGvecs','reference'))
keys = sorted(netcdf_files.keys())
assert keys == ['FFTGvecs_00010', 'FFTGvecs_00015','reference']
#test getting data
ks_bands,qp_bands = y.get_bands(tags='reference')
ks_bands.plot(show=False)
print(ks_bands)
qp_bands.plot(show=False)
print(qp_bands)
#test get_path
path = Path([[[1.0,1.0,1.0],'G'],
[[0.0,0.5,0.5],'X'],
[[0.0,0.0,0.0],'G'],
[[0.5,0.0,0.0],'L']], [20,20,20])
ks_bands_path,qp_bands_path = y.get_bands(tags='FFTGvecs',path_kpoints=path,type_calc=('gw'))
qp_bands_path.plot(show=False)
def tearDown(self):
sh.rmtree('gw_conv')
if __name__ == '__main__':
unittest.main()
| alexmoratalla/yambopy | yambopy/tests/test_yamboanalyse.py | Python | bsd-3-clause | 2,195 | [
"Yambo"
] | ded2440000d929307cb04bfd43ec23b572fa63914b88cfd6ca348cb86d0fa492 |
# -*- coding: utf-8 -*-
from parse import clauses
def parse_clause(text):
parsed = clauses.grammar.parse(text)
return clauses.ClauseVisitor().visit(parsed)
| uclmr/inferbeddings | notebooks/simple/parse/base.py | Python | mit | 167 | [
"VisIt"
] | 18069be83cd2a9fabc94b0dedda066c9dfc9fd29691795a4a97c5255effce8ec |
import networkx
from claripy.utils.orderedset import OrderedSet
from ..misc.ux import deprecated
# errors
from ..errors import AngrForwardAnalysisError
# notices
from ..errors import AngrSkipJobNotice, AngrDelayJobNotice, AngrJobMergingFailureNotice, \
AngrJobWideningFailureNotice
from .cfg.cfg_utils import CFGUtils
#
# Graph traversal
#
class GraphVisitor(object):
"""
A graph visitor takes a node in the graph and returns its successors. Typically it visits a control flow graph, and
returns successors of a CFGNode each time. This is the base class of all graph visitors.
"""
def __init__(self):
self._sorted_nodes = OrderedSet()
self._node_to_index = { }
self._reached_fixedpoint = set()
#
# Interfaces
#
def startpoints(self):
"""
Get all start points to begin the traversal.
:return: A list of startpoints that the traversal should begin with.
"""
raise NotImplementedError()
def successors(self, node):
"""
Get successors of a node. The node should be in the graph.
:param node: The node to work with.
:return: A list of successors.
:rtype: list
"""
raise NotImplementedError()
def predecessors(self, node):
"""
Get predecessors of a node. The node should be in the graph.
:param node: The node to work with.
:return: A list of predecessors.
:rtype: list
"""
raise NotImplementedError()
def sort_nodes(self, nodes=None):
"""
Get a list of all nodes sorted in an optimal traversal order.
:param iterable nodes: A collection of nodes to sort. If none, all nodes in the graph will be used to sort.
:return: A list of sorted nodes.
:rtype: list
"""
raise NotImplementedError()
#
# Public methods
#
def nodes(self):
"""
Return an iterator of nodes following an optimal traversal order.
:return:
"""
sorted_nodes = self.sort_nodes()
return iter(sorted_nodes)
@deprecated(replacement='nodes')
def nodes_iter(self):
"""
(Deprecated) Return an iterator of nodes following an optimal traversal order. Will be removed in the future.
"""
return self.nodes()
# Traversal
def reset(self):
"""
Reset the internal node traversal state. Must be called prior to visiting future nodes.
:return: None
"""
self._sorted_nodes.clear()
self._node_to_index.clear()
self._reached_fixedpoint.clear()
for i, n in enumerate(self.sort_nodes()):
self._node_to_index[n] = i
self._sorted_nodes.add(n)
def next_node(self):
"""
Get the next node to visit.
:return: A node in the graph.
"""
if not self._sorted_nodes:
return None
return self._sorted_nodes.pop(last=False)
def all_successors(self, node, skip_reached_fixedpoint=False):
"""
Returns all successors to the specific node.
:param node: A node in the graph.
:return: A set of nodes that are all successors to the given node.
:rtype: set
"""
successors = set()
stack = [ node ]
while stack:
n = stack.pop()
successors.add(n)
stack.extend(succ for succ in self.successors(n) if
succ not in successors and
(not skip_reached_fixedpoint or succ not in self._reached_fixedpoint)
)
return successors
def revisit(self, node, include_self=True):
"""
Revisit a node in the future. As a result, the successors to this node will be revisited as well.
:param node: The node to revisit in the future.
:return: None
"""
successors = self.successors(node) #, skip_reached_fixedpoint=True)
if include_self:
self._sorted_nodes.add(node)
for succ in successors:
self._sorted_nodes.add(succ)
# reorder it
self._sorted_nodes = OrderedSet(sorted(self._sorted_nodes, key=lambda n: self._node_to_index[n]))
def reached_fixedpoint(self, node):
"""
Mark a node as reached fixed-point. This node as well as all its successors will not be visited in the future.
:param node: The node to mark as reached fixed-point.
:return: None
"""
self._reached_fixedpoint.add(node)
class FunctionGraphVisitor(GraphVisitor):
def __init__(self, func, graph=None):
"""
:param knowledge.Function func:
"""
super(FunctionGraphVisitor, self).__init__()
self.function = func
if graph is None:
self.graph = self.function.graph
else:
self.graph = graph
self.reset()
def startpoints(self):
return [ self.function.startpoint ]
def successors(self, node):
return list(self.graph.successors(node))
def predecessors(self, node):
return list(self.graph.predecessors(node))
def sort_nodes(self, nodes=None):
sorted_nodes = CFGUtils.quasi_topological_sort_nodes(self.graph)
if nodes is not None:
sorted_nodes = [ n for n in sorted_nodes if n in set(nodes) ]
return sorted_nodes
class CallGraphVisitor(GraphVisitor):
def __init__(self, callgraph):
"""
:param networkx.DiGraph callgraph:
"""
super(CallGraphVisitor, self).__init__()
self.callgraph = callgraph
self.reset()
def startpoints(self):
# TODO: make sure all connected components are covered
start_nodes = [node for node in self.callgraph.nodes() if self.callgraph.in_degree(node) == 0]
if not start_nodes:
# randomly pick one
start_nodes = [ self.callgraph.nodes()[0] ]
return start_nodes
def successors(self, node):
return list(self.callgraph.successors(node))
def predecessors(self, node):
return list(self.callgraph.predecessors(node))
def sort_nodes(self, nodes=None):
sorted_nodes = CFGUtils.quasi_topological_sort_nodes(self.callgraph)
if nodes is not None:
sorted_nodes = [ n for n in sorted_nodes if n in set(nodes) ]
return sorted_nodes
class SingleNodeGraphVisitor(GraphVisitor):
def __init__(self, node):
"""
:param node: The single node that should be in the graph.
"""
super(SingleNodeGraphVisitor, self).__init__()
self.node = node
self.reset()
def startpoints(self):
return [ self.node.addr ]
def successors(self, node):
return [ ]
def predecessors(self, node):
return [ ]
def sort_nodes(self, nodes=None):
if nodes:
return nodes
else:
return [ self.node ]
#
# Job info
#
class JobInfo(object):
"""
Stores information of each job.
"""
def __init__(self, key, job):
self.key = key
self.jobs = [(job, '')]
self.narrowing_count = 0 # not used
def __hash__(self):
return hash(self.key)
def __eq__(self, o):
return type(self) == type(o) and \
self.key == o.key
def __repr__(self):
s = "<JobInfo %s>" % (str(self.key))
return s
@property
def job(self):
"""
Get the latest available job.
:return: The latest available job.
"""
job, _ = self.jobs[-1]
return job
@property
def merged_jobs(self):
for job, job_type in self.jobs:
if job_type == 'merged':
yield job
@property
def widened_jobs(self):
for job, job_type in self.jobs:
if job_type == 'widened':
yield job
def add_job(self, job, merged=False, widened=False):
"""
Appended a new job to this JobInfo node.
:param job: The new job to append.
:param bool merged: Whether it is a merged job or not.
:param bool widened: Whether it is a widened job or not.
"""
job_type = ''
if merged:
job_type = 'merged'
elif widened:
job_type = 'widened'
self.jobs.append((job, job_type))
class ForwardAnalysis(object):
"""
This is my very first attempt to build a static forward analysis framework that can serve as the base of multiple
static analyses in angr, including CFG analysis, VFG analysis, DDG, etc.
In short, ForwardAnalysis performs a forward data-flow analysis by traversing a graph, compute on abstract values,
and store results in abstract states. The user can specify what graph to traverse, how a graph should be traversed,
how abstract values and abstract states are defined, etc.
ForwardAnalysis has a few options to toggle, making it suitable to be the base class of several different styles of
forward data-flow analysis implementations.
ForwardAnalysis supports a special mode when no graph is available for traversal (for example, when a CFG is being
initialized and constructed, no other graph can be used). In that case, the graph traversal functionality is
disabled, and the optimal graph traversal order is not guaranteed. The user can provide a job sorting method to
sort the jobs in queue and optimize traversal order.
Feel free to discuss with me (Fish) if you have any suggestions or complaints.
"""
def __init__(self, order_jobs=False, allow_merging=False, allow_widening=False, status_callback=None,
graph_visitor=None
):
"""
Constructor
:param bool order_jobs: If all jobs should be ordered or not.
:param bool allow_merging: If job merging is allowed.
:param bool allow_widening: If job widening is allowed.
:param graph_visitor: A graph visitor to provide successors.
:type graph_visitor: GraphVisitor or None
:return: None
"""
self._order_jobs = order_jobs
self._allow_merging = allow_merging
self._allow_widening = allow_widening
self._status_callback = status_callback
self._graph_visitor = graph_visitor
# sanity checks
if self._allow_widening and not self._allow_merging:
raise AngrForwardAnalysisError('Merging must be allowed if widening is allowed.')
# Analysis progress control
self._should_abort = False
# All remaining jobs
self._job_info_queue = [ ]
# A map between job key to job. Jobs with the same key will be merged by calling _merge_jobs()
self._job_map = { }
# A mapping between node and abstract state
self._state_map = { }
# The graph!
# Analysis results (nodes) are stored here
self._graph = networkx.DiGraph()
#
# Properties
#
@property
def should_abort(self):
"""
Should the analysis be terminated.
:return: True/False
"""
return self._should_abort
@property
def graph(self):
return self._graph
@property
def jobs(self):
for job_info in self._job_info_queue:
yield job_info.job
#
# Public methods
#
def abort(self):
"""
Abort the analysis
:return: None
"""
self._should_abort = True
def has_job(self, job):
"""
Checks whether there exists another job which has the same job key.
:param job: The job to check.
:return: True if there exists another job with the same key, False otherwise.
"""
job_key = self._job_key(job)
return job_key in self._job_map
#
# Abstract interfaces
#
# Common interfaces
def _pre_analysis(self):
raise NotImplementedError('_pre_analysis() is not implemented.')
def _intra_analysis(self):
raise NotImplementedError('_intra_analysis() is not implemented.')
def _post_analysis(self):
raise NotImplementedError('_post_analysis() is not implemented.')
def _job_key(self, job):
raise NotImplementedError('_job_key() is not implemented.')
def _get_successors(self, job):
raise NotImplementedError('_get_successors() is not implemented.')
def _pre_job_handling(self, job):
raise NotImplementedError('_pre_job_handling() is not implemented.')
def _post_job_handling(self, job, new_jobs, successors):
raise NotImplementedError('_post_job_handling() is not implemented.')
def _handle_successor(self, job, successor, successors):
raise NotImplementedError('_handle_successor() is not implemented.')
def _job_queue_empty(self):
raise NotImplementedError('_job_queue_empty() is not implemented.')
def _initial_abstract_state(self, node):
raise NotImplementedError('_get_initial_abstract_state() is not implemented.')
def _run_on_node(self, node, state):
"""
The analysis routine that runs on each node in the graph.
:param node: A node in the graph.
:param state: An abstract state that acts as the initial abstract state of this analysis routine.
:return: A tuple: (changed, output abstract state)
"""
raise NotImplementedError('_run_on_node() is not implemented.')
def _merge_states(self, node, *states):
"""
Merge multiple abstract states into one.
:param node: A node in the graph.
:param states: Abstract states to merge.
:return: A merged abstract state.
"""
raise NotImplementedError('_merge_states() is not implemented.')
def _widen_states(self, *states):
raise NotImplementedError('_widen_states() is not implemented.')
# Special interfaces for non-graph-traversal mode
def _merge_jobs(self, *jobs):
raise NotImplementedError('_merge_jobs() is not implemented.')
def _should_widen_jobs(self, *jobs):
raise NotImplementedError('_should_widen_jobs() is not implemented.')
def _widen_jobs(self, *jobs):
raise NotImplementedError('_widen_jobs() is not implemented.')
def _job_sorting_key(self, job):
raise NotImplementedError('_job_sorting_key() is not implemented.')
#
# Private methods
#
def _analyze(self):
"""
The main analysis routine.
:return: None
"""
self._pre_analysis()
if self._graph_visitor is None:
# There is no base graph that we can rely on. The analysis itself should generate successors for the
# current job.
# An example is the CFG recovery.
self._analysis_core_baremetal()
else:
# We have a base graph to follow. Just handle the current job.
self._analysis_core_graph()
self._post_analysis()
def _analysis_core_graph(self):
while not self.should_abort:
self._intra_analysis()
n = self._graph_visitor.next_node()
if n is None:
# all done!
break
job_state = self._pop_input_state(n)
if job_state is None:
job_state = self._initial_abstract_state(n)
if n is None:
break
changed, output_state = self._run_on_node(n, job_state)
# output state of node n is input state for successors to node n
self._add_input_state(n, output_state)
if not changed:
# reached a fixed point
continue
# add all successors
self._graph_visitor.revisit(n, include_self=False)
def _add_input_state(self, node, input_state):
"""
Add the input state to all successors of the given node.
:param node: The node whose successors' input states will be touched.
:param input_state: The state that will be added to successors of the node.
:return: None
"""
successors = self._graph_visitor.successors(node)
for succ in successors:
if succ in self._state_map:
self._state_map[succ] = self._merge_states(succ, *([ self._state_map[succ], input_state ]))
else:
self._state_map[succ] = input_state
def _pop_input_state(self, node):
"""
Get the input abstract state for this node, and remove it from the state map.
:param node: The node in graph.
:return: A merged state, or None if there is no input state for this node available.
"""
if node in self._state_map:
return self._state_map.pop(node)
return None
def _merge_state_from_predecessors(self, node):
"""
Get abstract states for all predecessors of the node, merge them, and return the merged state.
:param node: The node in graph.
:return: A merged state, or None if no predecessor is available.
"""
preds = self._graph_visitor.predecessors(node)
states = [ self._state_map[n] for n in preds if n in self._state_map ]
if not states:
return None
return reduce(lambda s0, s1: self._merge_states(node, s0, s1), states[1:], states[0])
def _analysis_core_baremetal(self):
if not self._job_info_queue:
self._job_queue_empty()
while not self.should_abort:
if self._status_callback is not None:
self._status_callback(self)
# should_abort might be changed by the status callback function
if self.should_abort:
return
if not self._job_info_queue:
self._job_queue_empty()
if not self._job_info_queue:
# still no job available
break
job_info = self._job_info_queue[0]
try:
self._pre_job_handling(job_info.job)
except AngrDelayJobNotice:
# delay the handling of this job
continue
except AngrSkipJobNotice:
# consume and skip this job
self._job_info_queue = self._job_info_queue[1:]
self._job_map.pop(self._job_key(job_info.job), None)
continue
# remove the job info from the map
self._job_map.pop(self._job_key(job_info.job), None)
self._job_info_queue = self._job_info_queue[1:]
self._process_job_and_get_successors(job_info)
# Short-cut for aborting the analysis
if self.should_abort:
break
self._intra_analysis()
def _process_job_and_get_successors(self, job_info):
"""
Process a job, get all successors of this job, and call _handle_successor() to handle each successor.
:param JobInfo job_info: The JobInfo instance
:return: None
"""
job = job_info.job
successors = self._get_successors(job)
all_new_jobs = [ ]
for successor in successors:
new_jobs = self._handle_successor(job, successor, successors)
if new_jobs:
all_new_jobs.extend(new_jobs)
for new_job in new_jobs:
self._insert_job(new_job)
self._post_job_handling(job, all_new_jobs, successors)
def _insert_job(self, job):
"""
Insert a new job into the job queue. If the job queue is ordered, this job will be inserted at the correct
position.
:param job: The job to insert
:return: None
"""
key = self._job_key(job)
if self._allow_merging:
if key in self._job_map:
job_info = self._job_map[key]
# decide if we want to trigger a widening
# if not, we'll simply do the merge
# TODO: save all previous jobs for the sake of widening
job_added = False
if self._allow_widening and self._should_widen_jobs(job_info.job, job):
try:
widened_job = self._widen_jobs(job_info.job, job)
# remove the old job since now we have a widened one
if job_info in self._job_info_queue:
self._job_info_queue.remove(job_info)
job_info.add_job(widened_job, widened=True)
job_added = True
except AngrJobWideningFailureNotice:
# widening failed
# fall back to merging...
pass
if not job_added:
try:
merged_job = self._merge_jobs(job_info.job, job)
# remove the old job since now we have a merged one
if job_info in self._job_info_queue:
self._job_info_queue.remove(job_info)
job_info.add_job(merged_job, merged=True)
except AngrJobMergingFailureNotice:
# merging failed
job_info = JobInfo(key, job)
# update the job map
self._job_map[key] = job_info
else:
job_info = JobInfo(key, job)
self._job_map[key] = job_info
else:
job_info = JobInfo(key, job)
self._job_map[key] = job_info
if self._order_jobs:
self._binary_insert(self._job_info_queue, job_info, lambda elem: self._job_sorting_key(elem.job))
else:
self._job_info_queue.append(job_info)
def _peek_job(self, pos):
"""
Return the job currently at position `pos`, but still keep it in the job queue. An IndexError will be raised
if that position does not currently exist in the job list.
:param int pos: Position of the job to get.
:return: The job
"""
if pos < len(self._job_info_queue):
return self._job_info_queue[pos].job
raise IndexError()
#
# Utils
#
@staticmethod
def _binary_insert(lst, elem, key, lo=0, hi=None):
"""
Insert an element into a sorted list, and keep the list sorted.
The major difference from bisect.bisect_left is that this function supports a key method, so user doesn't have
to create the key array for each insertion.
:param list lst: The list. Must be pre-ordered.
:param object element: An element to insert into the list.
:param func key: A method to get the key for each element in the list.
:param int lo: Lower bound of the search.
:param int hi: Upper bound of the search.
:return: None
"""
if lo < 0:
raise ValueError("lo must be a non-negative number")
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if key(lst[mid]) < key(elem):
lo = mid + 1
else:
hi = mid
lst.insert(lo, elem)
| tyb0807/angr | angr/analyses/forward_analysis.py | Python | bsd-2-clause | 23,692 | [
"VisIt"
] | 46a12b504e39fa9405334b094b1902462f9aac1f02ce366b0c1351d4a6cdba08 |
import gtk
from gettext import gettext as _
import gobject
from ase.gui.widgets import pack
class Movie(gtk.Window):
def __init__(self, gui):
gtk.Window.__init__(self)
self.set_position(gtk.WIN_POS_NONE)
self.connect('destroy', self.close)
#self.connect('delete_event', self.exit2)
self.set_title(_('Movie'))
vbox = gtk.VBox()
pack(vbox, gtk.Label(_('Image number:')))
self.frame_number = gtk.Adjustment(gui.frame, 0,
gui.images.nimages - 1,
1.0, 5.0)
self.frame_number.connect('value-changed', self.new_frame)
hscale = pack(vbox, gtk.HScale(self.frame_number))
hscale.set_update_policy(gtk.UPDATE_CONTINUOUS)
hscale.set_digits(0)
buttons = [gtk.Button(stock=gtk.STOCK_GOTO_FIRST),
gtk.Button(stock=gtk.STOCK_GO_BACK),
gtk.Button(stock=gtk.STOCK_GO_FORWARD),
gtk.Button(stock=gtk.STOCK_GOTO_LAST)]
buttons[0].connect('clicked', self.click, -1, True)
buttons[1].connect('clicked', self.click, -1)
buttons[2].connect('clicked', self.click, 1)
buttons[3].connect('clicked', self.click, 1, True)
pack(vbox, buttons)
play = gtk.Button(_('Play'))
play.connect('clicked', self.play)
stop = gtk.Button(_('Stop'))
stop.connect('clicked', self.stop)
# TRANSLATORS: This function plays an animation forwards and backwards
# alternatingly, e.g. for displaying vibrational movement
self.rock = gtk.CheckButton(_('Rock'))
pack(vbox, [play, stop, gtk.Label(' '), self.rock])
if gui.images.nimages > 150:
skipdefault = gui.images.nimages/150
tdefault = min(max(gui.images.nimages/(skipdefault*5.0), 1.0), 30)
else:
skipdefault = 0
tdefault = min(max(gui.images.nimages/5.0, 1.0), 30)
self.time = gtk.Adjustment(tdefault, 1.0, 99, 0.1)
self.time_spin = gtk.SpinButton(self.time, 0, 0)
self.time_spin.set_digits(1)
self.time.connect("value-changed",self.frame_rate_changed)
self.skip = gtk.Adjustment(skipdefault, 0, 99, 1)
self.skip_spin = gtk.SpinButton(self.skip, 0, 0)
pack(vbox, [gtk.Label(_(' Frame rate: ')), self.time_spin,
gtk.Label(_(' Skip frames: ')), self.skip_spin,
gtk.Label(' ')])
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
#gui.m=self
self.direction = 1
self.id = None
gui.register_vulnerable(self)
def notify_atoms_changed(self):
"Called by gui object when the atoms have changed."
self.destroy()
def close(self, event):
self.stop()
def click(self, button, step, firstlast=False):
if firstlast and step < 0:
i = 0
elif firstlast:
i = self.gui.images.nimages - 1
else:
i = max(0, min(self.gui.images.nimages - 1, self.gui.frame + step))
self.gui.set_frame(i)
self.frame_number.value = i
if firstlast:
self.direction = cmp(-step, 0)
else:
self.direction = cmp(step, 0)
def new_frame(self, widget):
self.gui.set_frame(int(self.frame_number.value))
def play(self, widget=None):
if self.id is not None:
gobject.source_remove(self.id)
t = int(1000.0 / float(self.time.value))
self.id = gobject.timeout_add(t, self.step)
def stop(self, widget=None):
if self.id is not None:
gobject.source_remove(self.id)
self.id = None
def frame_rate_changed(self,widget=None):
if self.id is not None:
self.play()
def step(self):
i = self.gui.frame
nimages = self.gui.images.nimages
delta = int(self.skip.value + 1)
if self.rock.get_active():
if i <= self.skip.value:
self.direction = 1
elif i >= nimages - delta:
self.direction = -1
i += self.direction * delta
else:
i = (i + self.direction * delta + nimages) % nimages
self.frame_number.value = i
return True
def new_time(self, widget):
if self.id is not None:
self.play()
if __name__ == '__main__':
import os
os.system('python gui.py')
| grhawk/ASE | tools/ase/gui/movie.py | Python | gpl-2.0 | 4,556 | [
"ASE"
] | f8f88fe0e618251157f5b5bf25acef393a7d76d8b35513fc27645a756c1b7825 |
# -*- coding: utf-8 -*-
"""----------------------------------------------------------------------------
Author:
Huang Quanyong (wo1fSea)
quanyongh@foxmail.com
Date:
2017/6/29
Description:
test_neuron_network.py
----------------------------------------------------------------------------"""
import unittest
from pyenat.neuron_network import NeuronNetwork, Neuron
class TestNeuronNetwork(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_neuron_network(self):
"""
test neuron_network
test case
input: n0 = 1, n1 = 1, n2 = 2
output: n3 = activation_function(1. * n5), n4 = activation_function(2. * n5)
hidden: n5 = activation_function(0.1 * n0 + 1. * n1 + 10. * n2 + 100. * n3 + 1000. * n4)
:return:
"""
neron_network = NeuronNetwork()
neron_network.input_size = 2
neron_network.output_size = 2
neron_network.hidden_size = 1
for i in range(neron_network.input_size + neron_network.output_size + neron_network.hidden_size + 1):
neron_network.neurons.append(Neuron())
neron_network.neurons[5].incoming_link[0] = 0.1
neron_network.neurons[5].incoming_link[1] = 1.
neron_network.neurons[5].incoming_link[2] = 10.
neron_network.neurons[5].incoming_link[3] = 100.
neron_network.neurons[5].incoming_link[4] = 1000.
neron_network.neurons[3].incoming_link[5] = 1.
neron_network.neurons[4].incoming_link[5] = 2.
def activation_function(x): return x ** 2
neron_network.activation_function = activation_function
input_data = [1, 1, 2]
n3 = 0
n4 = 0
n5 = activation_function(
0.1 * input_data[0] + 1. * input_data[1] + 10. * input_data[2] + 100. * n3 + 1000. * n4)
n3 = activation_function(1. * n5)
n4 = activation_function(2. * n5)
self.assertSequenceEqual(neron_network.activate(input_data[1:]), [n3, n4])
n5 = activation_function(
0.1 * input_data[0] + 1. * input_data[1] + 10. * input_data[2] + 100. * n3 + 1000. * n4)
n3 = activation_function(1. * n5)
n4 = activation_function(2. * n5)
self.assertSequenceEqual(neron_network.activate(input_data[1:]), [n3, n4])
def test_neuron_network_with_genome(self):
"""
test neuron_network
test case
input: n0 = 1, n1 = 1, n2 = 2
output: n3 = activation_function(1. * n5), n4 = activation_function(2. * n5)
hidden: n5 = activation_function(0.1 * n0 + 1. * n1 + 10. * n2 + 100. * n3 + 1000. * n4)
:return:
"""
from pyenat.config import new_config
from pyenat.environment import Environment
from pyenat.genome import Genome, Gene
config = new_config()
config["input_size"] = 2
config["output_size"] = 2
environment = Environment(config)
genome = Genome(environment)
genome.hidden_size = 1
l53 = Gene()
l53.from_node = 5
l53.to_node = 3
l53.weight = 1.
l53.enable = True
genome.genes.append(l53)
l54 = Gene()
l54.from_node = 5
l54.to_node = 4
l54.weight = 2.
l54.enable = True
genome.genes.append(l54)
l05 = Gene()
l05.from_node = 0
l05.to_node = 5
l05.weight = 0.1
l05.enable = True
genome.genes.append(l05)
l15 = Gene()
l15.from_node = 1
l15.to_node = 5
l15.weight = 1.
l15.enable = True
genome.genes.append(l15)
l25 = Gene()
l25.from_node = 2
l25.to_node = 5
l25.weight = 10.
l25.enable = True
genome.genes.append(l25)
l35 = Gene()
l35.from_node = 3
l35.to_node = 5
l35.weight = 100.
l35.enable = True
genome.genes.append(l35)
l45 = Gene()
l45.from_node = 4
l45.to_node = 5
l45.weight = 1000.
l45.enable = True
genome.genes.append(l45)
def activation_function(x): return x ** 2
neron_network = genome.get_neuron_network()
neron_network.activation_function = activation_function
input_data = [1, 1, 2]
n3 = 0
n4 = 0
n5 = activation_function(
0.1 * input_data[0] + 1. * input_data[1] + 10. * input_data[2] + 100. * n3 + 1000. * n4)
n3 = activation_function(1. * n5)
n4 = activation_function(2. * n5)
self.assertSequenceEqual(neron_network.activate(input_data[1:]), [n3, n4])
n5 = activation_function(
0.1 * input_data[0] + 1. * input_data[1] + 10. * input_data[2] + 100. * n3 + 1000. * n4)
n3 = activation_function(1. * n5)
n4 = activation_function(2. * n5)
self.assertSequenceEqual(neron_network.activate(input_data[1:]), [n3, n4])
| wo1fsea/PyTWEANN | tests/test_neuron_network.py | Python | mit | 4,968 | [
"NEURON"
] | 2400529a24a15500974fb370bf4329046db83d091f566b691868e61d1493e3eb |
"""
Backend wrapper
"""
__RCSID__ = "$Id$"
class AbstractBackend(object):
"""
AbstractBackend is used to create an abstraction of handler and formatter concepts from 'logging'.
It corresponds to the backend concept of the old gLogger.
It is useful for different reasons:
- to gather handler and formatter,
in DIRAC, each handler must be bind with a specific formatter so
it is more simple and more clear to create an object for this job.
- each backend can get its options and
format them to give them to the handler,
in DIRAC, it is possible to add backend options in the cfgfile.
For example, for a file, you can give the filename that you want to write log inside.
The backend allows to each handler to get its own options as parameters. Again, it is more clear
and more simple to have a specific object for this job.
In this way, we have an object composed by one handler and one formatter name.
The purpose of the object is to get cfg options to give them to the handler,
and to set the format of the handler when the display must be changed.
"""
def __init__(self, handler, formatter):
"""
Initialization of the backend.
_handler and _formatter can be custom objects. If it is the case, you can find them
in FrameworkSystem/private/standardLogging/Formatter or Handler.
:params _handler: handler object from 'logging'. Ex: StreamHandler(), FileHandler()...
:params _formatter: the name of a formatter object from logging. Ex: BaseFormatter
"""
self._handler = handler
self._formatter = formatter
def createHandler(self, parameters=None):
"""
Each backend can initialize its attributes and create its handler with them.
:params parameters: dictionary of parameters. ex: {'FileName': file.log}
"""
raise NotImplementedError("setParameter not implemented")
def getHandler(self):
"""
:return: the handler
"""
return self._handler
def setFormat(self, fmt, datefmt, options):
"""
Each backend give a format to their formatters and attach them to their handlers.
:params fmt: string representing the log format
:params datefmt: string representing the date format
:params component: string represented as "system/component"
:params options: dictionary of logging options. ex: {'Color': True}
"""
self._handler.setFormatter(self._formatter(fmt, datefmt, options))
def setLevel(self, level):
"""
Configure the level of the handler associated to the backend.
:params level: integer representing a level
"""
self._handler.setLevel(level)
@staticmethod
def createFormat(options):
"""
Create a format from the options given in parameters.
:params options: dictionary of options of the Logging which wants a new format
:params level: integer representing the level of the Logging object which wants a new format
:return: tuple containing two strings: a format and a date format
"""
fmt = ''
datefmt = '%Y-%m-%d %H:%M:%S'
if options['headerIsShown']:
fmt += '%(asctime)s UTC %(componentname)s%(customname)s'
if options['threadIDIsShown']:
fmt += ' [%(thread)d]'
fmt += ' %(levelname)s: '
fmt += '%(message)s%(spacer)s%(varmessage)s'
return (datefmt, fmt)
| andresailer/DIRAC | Resources/LogBackends/AbstractBackend.py | Python | gpl-3.0 | 3,329 | [
"DIRAC"
] | e5591abba549177af9e9053153a20fc1ffc55a157eb3370f820b4f710624ddd0 |
"""
Simple ga server for the basicbot.
"""
import argparse
import json
import zmq
import random
import threading
from deap import base
from deap import creator
from deap import tools
############################################################################
############################################################################
############################################################################
# Writing output files, pull out into its own class later?
def writeHeaders(filename,additional_headers=""):
""" Write out the headers for a logging file.
Args:
filename: Where to write the file and what to call it.
additional_headers: any additional information to log. Typically a comma-separated string of genes.
"""
with open(filename,"w") as f:
f.write("Gen,Ind,Ind_ID,Fit_1")
if additional_headers:
f.write(","+additional_headers)
f.write("\n")
def writeGeneration(filename,generation,individuals):
""" Write out the fitness information for a generation. """
with open(filename,"a") as f:
for i,ind in enumerate(individuals):
f.write(str(generation)+","+str(i)+","+str(ind.id))
f.write(","+str(ind.fitness)+",")
f.write(",".join(str(i) for i in ind))
f.write("\n")
############################################################################
############################################################################
############################################################################
class senderThread(threading.Thread):
def __init__(self, threadID, socket, population):
threading.Thread.__init__(self)
self.threadID = threadID
self.socket = socket
self.population = population
def run(self):
print("\t\t\t\tStarting Sender Thread:"+str(self.threadID))
self.send_data()
print("\t\t\t\tExiting Sender Thread:"+str(self.threadID))
def send_data(self):
""" Send data to worker processes.
Args:
socket: socket to send the data out on.
- Persistant throughout execution for now.
"""
for ind in self.population:
ind_pkt = {'id':ind.id,'genome':ind, 'fitness':-1.0}
msg = json.dumps(ind_pkt)
socket.send(msg)
# Process inputs.
parser = argparse.ArgumentParser()
parser.add_argument("--gens", type=int, default=100, help="Number of generations to run evolution for.")
parser.add_argument("--pop_size", type=int, default=100, help="Population size for evolution.")
parser.add_argument("--eval_time", type=float, default=10., help="Simulation time for an individual.")
parser.add_argument("--run_num", type=int, default=0, help="Run Number")
parser.add_argument("--output_path", type=str, default="./", help="Output path")
args = parser.parse_args()
# Initialize the random number seed.
random.seed(args.run_num)
# Setup the socket to send genome data out on.
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.bind('tcp://127.0.0.1:5000')
# Setup the socket to read the responses on.
receiver = context.socket(zmq.PULL)
receiver.bind('tcp://127.0.0.1:5010')
print("Press Enter when the workers are ready: ")
_ = raw_input()
print("Sending tasks to workers")
def generate_id():
for i in range(10000):
yield i
id_generator = generate_id()
def format_float(value):
""" Return a formatted float value capable of being printed. """
return float("{0:.4f}".format(value))
def init_gene(val_range=10.0):
""" Initialize a gene in the range of 0 to 10. """
return format_float(random.random()*val_range)
def init_individual(create):
""" Initialize an individual. """
ind = create()
ind.id = next(id_generator)
ind.append(init_gene(10.0)) # center_spin_thresh
ind.append(9.0+init_gene(1.0)) # center_drive_thresh
ind.append(init_gene(10.0)) # center_stop_thresh
ind.append(init_gene(10.0)) # stopping_thresh
return ind
def individual_genes_str():
return "center_spin_thresh,center_drive_thresh,center_stop_thresh,stopping_thresh"
def mutate_value(value,low_lim,upp_lim):
""" Mutate a value by a gaussian within the bounds.
Args:
value: initial value of the parameter.
upp_lim: upper limit of the parameter
low_lim: lower limit of the parameter
"""
value = format_float(random.gauss(value, (upp_lim-low_lim)*0.1)) # Mutate in the range of 10% SD of the value
if(value > upp_lim):
value = upp_lim
elif(value < low_lim):
value = low_lim
return value
def mutate(individual, mut_prob=0.04):
""" Mutate an individual.
Args:
individual: list of floats to mutate
mut_prob: mutation probability per element in the genome.
"""
for i in range(len(individual)):
if random.random() < mut_prob:
individual[i] = mutate_value(individual[i],0.0,10.0)
return (individual,)
def evaluate_population(population, gen):
""" Evaluate a population and set fitnesses appropriately.
Args:
population: list of individuals
gen: generation being conducted
Returns:
list of population.
"""
# Start a thread to send the data.
sendThread = senderThread(gen, socket, population)
sendThread.start()
# Read the responses on the receiver socket.
i = len(population)
while i > 0:
data = json.loads(receiver.recv())
print(data['fitness'],data['id'])
population[get_index_of_ind(population,data['id'])].fitness = data['fitness']
i -= 1
# Wait for the send thread to complete.
sendThread.join()
return population
def get_index_of_ind(population, ind_id):
""" Get the index of the individual in the population. """
for i,ind in enumerate(population):
if ind.id == ind_id:
return i
# Establish name of the output files and write appropriate headers.
out_fit_file = args.output_path+str(args.run_num)+"_fitnesses.dat"
writeHeaders(out_fit_file,additional_headers=individual_genes_str())
# Create an individual.
creator.create("Fitness", base.Fitness, weights=(1.0,)) # Minimize time to reach cylinder
creator.create("Individual", list, fitness=creator.Fitness)
# Create the toolbox for setting up DEAP functionality.
toolbox = base.Toolbox()
# Create the toolbox for tracking history.
history = tools.History()
# Define an individual for use in constructing the population.
# Gene generator.
toolbox.register("attr_gene",init_gene)
# Initialize the genome for the individual.
toolbox.register("individual", init_individual, creator.Individual)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mutate", mutate)
toolbox.register("mate", tools.cxTwoPoint)
# Decorate the variation operators
toolbox.decorate("mate", history.decorator)
toolbox.decorate("mutate", history.decorator)
# Create a population as a list.
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# Register the selection function.
toolbox.register("select", tools.selTournament, tournsize=2)
# Crossover and mutation probability
cxpb, mutpb = 0.5, 0.05
# Setup the population.
pop = toolbox.population(n=args.pop_size)
history.update(pop)
# Can conduct evaluations this way.
for p in pop:
print(p.id,": ",p[0],p[1],p[2],p[3])
p.fitness = (random.random(),)
print(p.fitness)
pop = evaluate_population(pop,0)
# Log the progress of the population. (For Generation 0)
writeGeneration(out_fit_file,0,pop)
for g in range(1,args.gens):
# Pull out the elite individual to save for later.
elite = tools.selBest(pop, k=1)
offspring = toolbox.select(pop, k=len(pop)-1)
offspring = list(map(toolbox.clone, offspring))
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < cxpb:
toolbox.mate(child1, child2)
child1.fitness = -1.0
child2.fitness = -1.0
for mutant in offspring:
if random.random() < mutpb:
toolbox.mutate(mutant)
mutant.fitness = -1.0
pop[:] = offspring + elite
# Request new id's for the population.
for i in range(len(pop)):
pop[i].id = g*args.pop_size + i
evaluate_population(pop, g)
print("Generation "+str(g))
# Log the progress of the population.
writeGeneration(out_fit_file,g,pop)
history.update(pop)
print("Closing Socket")
socket.close()
receiver.close()
| jaredmoore/ros_gazebo_python | src/basicbot_ga/test/ga_server.py | Python | gpl-3.0 | 8,606 | [
"Gaussian"
] | 317ba8def43eb2ccc5024f528dce3416e49cf2cb940234b4d80ea2ceb2466678 |
"""
Course Outline page in Studio.
"""
import datetime
from bok_choy.javascript import js_defined, wait_for_js
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from common.test.acceptance.pages.common.utils import click_css, confirm_prompt
from common.test.acceptance.pages.studio.container import ContainerPage
from common.test.acceptance.pages.studio.course_page import CoursePage
from common.test.acceptance.pages.studio.utils import set_input_value, set_input_value_and_save
from common.test.acceptance.tests.helpers import disable_animations, enable_animations, select_option_by_text
@js_defined('jQuery')
class CourseOutlineItem(object):
"""
A mixin class for any :class:`PageObject` shown in a course outline.
"""
# Note there are a few pylint disable=no-member occurances in this class, because
# it was written assuming it is going to be a mixin to a PageObject and will have functions
# such as self.wait_for_ajax, which doesn't exist on a generic `object`.
BODY_SELECTOR = None
EDIT_BUTTON_SELECTOR = '.xblock-field-value-edit'
NAME_SELECTOR = '.item-title'
NAME_INPUT_SELECTOR = '.xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.xblock-title .wrapper-xblock-field'
STATUS_MESSAGE_SELECTOR = '> div[class$="status"] .status-message'
CONFIGURATION_BUTTON_SELECTOR = '.action-item .configure-button'
def __repr__(self):
# CourseOutlineItem is also used as a mixin for CourseOutlinePage, which doesn't have a locator
# Check for the existence of a locator so that errors when navigating to the course outline page don't show up
# as errors in the repr method instead.
try:
return "{}(<browser>, {!r})".format(self.__class__.__name__, self.locator) # pylint: disable=no-member
except AttributeError:
return "{}(<browser>)".format(self.__class__.__name__)
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineItem` context
"""
# If the item doesn't have a body selector or locator, then it can't be bounded
# This happens in the context of the CourseOutlinePage
# pylint: disable=no-member
if self.BODY_SELECTOR and hasattr(self, 'locator'):
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
else:
return selector
@property
def name(self):
"""
Returns the display name of this object.
"""
name_element = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).first # pylint: disable=no-member
if name_element:
return name_element.text[0]
else:
return None
@property
def has_status_message(self):
"""
Returns True if the item has a status message, False otherwise.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).first.visible # pylint: disable=no-member
@property
def status_message(self):
"""
Returns the status message of this item.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).text[0] # pylint: disable=no-member
@property
def has_staff_lock_warning(self):
""" Returns True if the 'Contains staff only content' message is visible """
return self.status_message == 'Contains staff only content' if self.has_status_message else False
@property
def has_restricted_warning(self):
""" Returns True if the 'Access to this unit is restricted to' message is visible """
return 'Access to this unit is restricted to' in self.status_message if self.has_status_message else False
@property
def is_staff_only(self):
""" Returns True if the visiblity state of this item is staff only (has a black sidebar) """
return "is-staff-only" in self.q(css=self._bounded_selector(''))[0].get_attribute("class") # pylint: disable=no-member
def edit_name(self):
"""
Puts the item's name into editable form.
"""
self.q(css=self._bounded_selector(self.EDIT_BUTTON_SELECTOR)).first.click() # pylint: disable=no-member
def enter_name(self, new_name):
"""
Enters new_name as the item's display name.
"""
set_input_value(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
def change_name(self, new_name):
"""
Changes the container's name.
"""
self.edit_name()
set_input_value_and_save(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
self.wait_for_ajax() # pylint: disable=no-member
def finalize_name(self):
"""
Presses ENTER, saving the value of the display name for this item.
"""
# pylint: disable=no-member
self.q(css=self._bounded_selector(self.NAME_INPUT_SELECTOR)).results[0].send_keys(Keys.ENTER)
self.wait_for_ajax()
def set_staff_lock(self, is_locked):
"""
Sets the explicit staff lock of item on the container page to is_locked.
"""
modal = self.edit()
modal.is_explicitly_locked = is_locked
modal.save()
def get_enrollment_select_options(self):
"""
Gets the option names available for unit group access
"""
modal = self.edit()
group_options = self.q(css='.group-select-title option').text
modal.cancel()
return group_options
def toggle_unit_access(self, partition_name, group_ids):
"""
Toggles unit access to the groups in group_ids
"""
if group_ids:
modal = self.edit()
groups_select = self.q(css='.group-select-title select')
select_option_by_text(groups_select, partition_name)
for group_id in group_ids:
checkbox = self.q(css='#content-group-{group_id}'.format(group_id=group_id))
checkbox.click()
modal.save()
def in_editable_form(self):
"""
Return whether this outline item's display name is in its editable form.
"""
# pylint: disable=no-member
return "is-editing" in self.q(
css=self._bounded_selector(self.NAME_FIELD_WRAPPER_SELECTOR)
)[0].get_attribute("class")
def edit(self):
"""
Puts the item into editable form.
"""
self.q(css=self._bounded_selector(self.CONFIGURATION_BUTTON_SELECTOR)).first.click() # pylint: disable=no-member
if 'subsection' in self.BODY_SELECTOR:
modal = SubsectionOutlineModal(self)
else:
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.') # pylint: disable=unnecessary-lambda
return modal
@property
def release_date(self):
"""
Returns the release date from the page. Date is "mm/dd/yyyy" string.
"""
element = self.q(css=self._bounded_selector(".status-release-value")) # pylint: disable=no-member
return element.first.text[0] if element.present else None
@property
def due_date(self):
"""
Returns the due date from the page. Date is "mm/dd/yyyy" string.
"""
element = self.q(css=self._bounded_selector(".status-grading-date")) # pylint: disable=no-member
return element.first.text[0] if element.present else None
@property
def policy(self):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.q(css=self._bounded_selector(".status-grading-value")) # pylint: disable=no-member
return element.first.text[0] if element.present else None
@wait_for_js
def publish(self):
"""
Publish the unit.
"""
click_css(self, self._bounded_selector('.action-publish'), require_notification=False)
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.') # pylint: disable=unnecessary-lambda
modal.publish()
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css=self._bounded_selector('.action-publish')).first # pylint: disable=no-member
class CourseOutlineContainer(CourseOutlineItem):
"""
A mixin to a CourseOutline page object that adds the ability to load
a child page object by title or by index.
CHILD_CLASS must be a :class:`CourseOutlineChild` subclass.
"""
CHILD_CLASS = None
ADD_BUTTON_SELECTOR = '> .outline-content > .add-item a.button-new'
def child(self, title, child_class=None):
"""
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
# pylint: disable=no-member
return child_class(
self.browser,
self.q(css=child_class.BODY_SELECTOR).filter(
lambda el: title in [inner.text for inner in
el.find_elements_by_css_selector(child_class.NAME_SELECTOR)]
).attrs('data-locator')[0]
)
def children(self, child_class=None):
"""
Returns all the children page objects of class child_class.
"""
if not child_class:
child_class = self.CHILD_CLASS
# pylint: disable=no-member
return self.q(css=self._bounded_selector(child_class.BODY_SELECTOR)).map(
lambda el: child_class(self.browser, el.get_attribute('data-locator'))).results
def child_at(self, index, child_class=None):
"""
Returns the child at the specified index.
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.children(child_class)[index]
def add_child(self, require_notification=True):
"""
Adds a child to this xblock, waiting for notifications.
"""
click_css(
self,
self._bounded_selector(self.ADD_BUTTON_SELECTOR),
require_notification=require_notification,
)
def expand_subsection(self):
"""
Toggle the expansion of this subsection.
"""
disable_animations(self)
def subsection_expanded():
"""
Returns whether or not this subsection is expanded.
"""
self.wait_for_element_presence(
self._bounded_selector(self.ADD_BUTTON_SELECTOR), 'Toggle control is present'
)
css_element = self._bounded_selector(self.ADD_BUTTON_SELECTOR)
add_button = self.q(css=css_element).first.results # pylint: disable=no-member
self.scroll_to_element(css_element) # pylint: disable=no-member
return add_button and add_button[0].is_displayed()
currently_expanded = subsection_expanded()
# Need to click slightly off-center in order for the click to be recognized.
css_element = self._bounded_selector('.ui-toggle-expansion .fa')
self.scroll_to_element(css_element) # pylint: disable=no-member
ele = self.browser.find_element_by_css_selector(css_element) # pylint: disable=no-member
ActionChains(self.browser).move_to_element_with_offset(ele, 8, 8).click().perform() # pylint: disable=no-member
self.wait_for_element_presence(self._bounded_selector(self.ADD_BUTTON_SELECTOR), 'Subsection is expanded')
EmptyPromise(
lambda: subsection_expanded() != currently_expanded,
"Check that the container {} has been toggled".format(self.locator)
).fulfill()
enable_animations(self)
return self
@property
def is_collapsed(self):
"""
Return whether this outline item is currently collapsed.
"""
css_element = self._bounded_selector('')
self.scroll_to_element(css_element) # pylint: disable=no-member
return "is-collapsed" in self.q(css=css_element).first.attrs("class")[0] # pylint: disable=no-member
class CourseOutlineChild(PageObject, CourseOutlineItem):
"""
A page object that will be used as a child of :class:`CourseOutlineContainer`.
"""
url = None
BODY_SELECTOR = '.outline-item'
def __init__(self, browser, locator):
super(CourseOutlineChild, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def delete(self, cancel=False):
"""
Clicks the delete button, then cancels at the confirmation prompt if cancel is True.
"""
click_css(self, self._bounded_selector('.delete-button'), require_notification=False)
confirm_prompt(self, cancel)
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant items of this item.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineChild(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if descendant.locator not in grand_locators]
class CourseOutlineUnit(CourseOutlineChild):
"""
PageObject that wraps a unit link on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-unit'
NAME_SELECTOR = '.unit-title a'
def go_to(self):
"""
Open the container page linked to by this unit link, and return
an initialized :class:`.ContainerPage` for that unit.
"""
return ContainerPage(self.browser, self.locator).visit()
def is_browser_on_page(self):
return self.q(css=self.BODY_SELECTOR).present
def children(self):
return self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineUnit(self.browser, el.get_attribute('data-locator'))).results
class CourseOutlineSubsection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a subsection block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-subsection'
NAME_SELECTOR = '.subsection-title'
NAME_FIELD_WRAPPER_SELECTOR = '.subsection-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineUnit
def unit(self, title):
"""
Return the :class:`.CourseOutlineUnit with the title `title`.
"""
return self.child(title)
def units(self):
"""
Returns the units in this subsection.
"""
return self.children()
def unit_at(self, index):
"""
Returns the CourseOutlineUnit at the specified index.
"""
return self.child_at(index)
def add_unit(self):
"""
Adds a unit to this subsection
"""
self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).click()
class CourseOutlineSection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a section block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-section'
NAME_SELECTOR = '.section-title'
NAME_FIELD_WRAPPER_SELECTOR = '.section-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineSubsection
def subsection(self, title):
"""
Return the :class:`.CourseOutlineSubsection` with the title `title`.
"""
return self.child(title)
def subsections(self):
"""
Returns a list of the CourseOutlineSubsections of this section
"""
return self.children()
def subsection_at(self, index):
"""
Returns the CourseOutlineSubsection at the specified index.
"""
return self.child_at(index)
def add_subsection(self):
"""
Adds a subsection to this section
"""
self.add_child()
class ExpandCollapseLinkState(object):
"""
Represents the three states that the expand/collapse link can be in
"""
MISSING = 0
COLLAPSE = 1
EXPAND = 2
class CourseOutlinePage(CoursePage, CourseOutlineContainer):
"""
Course Outline page in Studio.
"""
url_path = "course"
CHILD_CLASS = CourseOutlineSection
EXPAND_COLLAPSE_CSS = '.button-toggle-expand-collapse'
BOTTOM_ADD_SECTION_BUTTON = '.outline > .add-section .button-new'
def is_browser_on_page(self):
return all([
self.q(css='body.view-outline').present,
self.q(css='.content-primary').present,
self.q(css='div.ui-loading.is-hidden').present
])
def view_live(self):
"""
Clicks the "View Live" link and switches to the new tab
"""
click_css(self, '.view-live-button', require_notification=False)
self.wait_for_page()
self.browser.switch_to_window(self.browser.window_handles[-1])
def section(self, title):
"""
Return the :class:`.CourseOutlineSection` with the title `title`.
"""
return self.child(title)
def section_at(self, index):
"""
Returns the :class:`.CourseOutlineSection` at the specified index.
"""
return self.child_at(index)
def click_section_name(self, parent_css=''):
"""
Find and click on first section name in course outline
"""
self.q(css='{} .section-name'.format(parent_css)).first.click()
def get_section_name(self, parent_css='', page_refresh=False):
"""
Get the list of names of all sections present
"""
if page_refresh:
self.browser.refresh()
return self.q(css='{} .section-name'.format(parent_css)).text
def section_name_edit_form_present(self, parent_css=''):
"""
Check that section name edit form present
"""
return self.q(css='{} .section-name input'.format(parent_css)).present
def change_section_name(self, new_name, parent_css=''):
"""
Change section name of first section present in course outline
"""
self.click_section_name(parent_css)
self.q(css='{} .section-name input'.format(parent_css)).first.fill(new_name)
self.q(css='{} .section-name .save-button'.format(parent_css)).first.click()
self.wait_for_ajax()
def sections(self):
"""
Returns the sections of this course outline page.
"""
return self.children()
def add_section_from_top_button(self):
"""
Clicks the button for adding a section which resides at the top of the screen.
"""
click_css(self, '.wrapper-mast nav.nav-actions .button-new')
def add_section_from_bottom_button(self, click_child_icon=False):
"""
Clicks the button for adding a section which resides at the bottom of the screen.
"""
element_css = self.BOTTOM_ADD_SECTION_BUTTON
if click_child_icon:
element_css += " .fa-plus"
click_css(self, element_css)
def toggle_expand_collapse(self):
"""
Toggles whether all sections are expanded or collapsed
"""
self.q(css=self.EXPAND_COLLAPSE_CSS).click()
def start_reindex(self):
"""
Starts course reindex by clicking reindex button
"""
self.reindex_button.click()
def open_subsection_settings_dialog(self, index=0):
"""
clicks on the settings button of subsection.
"""
self.q(css=".subsection-header-actions .configure-button").nth(index).click()
self.wait_for_element_presence('.course-outline-modal', 'Subsection settings modal is present.')
def change_problem_release_date(self):
"""
Sets a new start date
"""
self.q(css=".subsection-header-actions .configure-button").first.click()
self.q(css="#start_date").fill("01/01/2030")
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def change_problem_due_date(self, date):
"""
Sets a new due date.
Expects date to be a string that will be accepted by the input (for example, '01/01/1970')
"""
self.q(css=".subsection-header-actions .configure-button").first.click()
self.q(css="#due_date").fill(date)
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def select_visibility_tab(self):
"""
Select the advanced settings tab
"""
self.q(css=".settings-tab-button[data-tab='visibility']").first.click()
self.wait_for_element_presence('input[value=hide_after_due]', 'Visibility fields not present.')
def select_advanced_tab(self, desired_item='special_exam'):
"""
Select the advanced settings tab
"""
self.q(css=".settings-tab-button[data-tab='advanced']").first.click()
if desired_item == 'special_exam':
self.wait_for_element_presence('input.no_special_exam', 'Special exam settings fields not present.')
if desired_item == 'gated_content':
self.wait_for_element_visibility('#is_prereq', 'Gating settings fields are present.')
def make_exam_proctored(self):
"""
Makes a Proctored exam.
"""
self.q(css="input.proctored_exam").first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def make_exam_timed(self, hide_after_due=False):
"""
Makes a timed exam.
"""
self.q(css="input.timed_exam").first.click()
if hide_after_due:
self.select_visibility_tab()
self.q(css='input[name=content-visibility][value=hide_after_due]').first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def make_subsection_hidden_after_due_date(self):
"""
Sets a subsection to be hidden after due date.
"""
self.q(css='input[value=hide_after_due]').first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def select_none_exam(self):
"""
Choose "none" exam but do not press enter
"""
self.q(css="input.no_special_exam").first.click()
def select_timed_exam(self):
"""
Choose a timed exam but do not press enter
"""
self.q(css="input.timed_exam").first.click()
def select_proctored_exam(self):
"""
Choose a proctored exam but do not press enter
"""
self.q(css="input.proctored_exam").first.click()
def select_practice_exam(self):
"""
Choose a practice exam but do not press enter
"""
self.q(css="input.practice_exam").first.click()
def time_allotted_field_visible(self):
"""
returns whether the time allotted field is visible
"""
return self.q(css=".field-time-limit").visible
def exam_review_rules_field_visible(self):
"""
Returns whether the review rules field is visible
"""
return self.q(css=".field-exam-review-rules").visible
def proctoring_items_are_displayed(self):
"""
Returns True if all the items are found.
"""
# The None radio button
if not self.q(css="input.no_special_exam").present:
return False
# The Timed exam radio button
if not self.q(css="input.timed_exam").present:
return False
# The Proctored exam radio button
if not self.q(css="input.proctored_exam").present:
return False
# The Practice exam radio button
if not self.q(css="input.practice_exam").present:
return False
return True
def make_gating_prerequisite(self):
"""
Makes a subsection a gating prerequisite.
"""
if not self.q(css="#is_prereq")[0].is_selected():
self.q(css='label[for="is_prereq"]').click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def add_prerequisite_to_subsection(self, min_score, min_completion):
"""
Adds a prerequisite to a subsection.
"""
Select(self.q(css="#prereq")[0]).select_by_index(1)
self.q(css="#prereq_min_score").fill(min_score)
self.q(css="#prereq_min_completion").fill(min_completion)
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def gating_prerequisite_checkbox_is_visible(self):
"""
Returns True if the gating prerequisite checkbox is visible.
"""
# The Prerequisite checkbox is visible
return self.q(css="#is_prereq").visible
def gating_prerequisite_checkbox_is_checked(self):
"""
Returns True if the gating prerequisite checkbox is checked.
"""
# The Prerequisite checkbox is checked
return self.q(css="#is_prereq:checked").present
def gating_prerequisites_dropdown_is_visible(self):
"""
Returns True if the gating prerequisites dropdown is visible.
"""
# The Prerequisites dropdown is visible
return self.q(css="#prereq").visible
def gating_prerequisite_min_score_is_visible(self):
"""
Returns True if the gating prerequisite minimum score input is visible.
"""
# The Prerequisites dropdown is visible
return self.q(css="#prereq_min_score").visible
@property
def bottom_add_section_button(self):
"""
Returns the query representing the bottom add section button.
"""
return self.q(css=self.BOTTOM_ADD_SECTION_BUTTON).first
@property
def has_no_content_message(self):
"""
Returns true if a message informing the user that the course has no content is visible
"""
return self.q(css='.outline .no-content').is_present()
@property
def has_rerun_notification(self):
"""
Returns true iff the rerun notification is present on the page.
"""
return self.q(css='.wrapper-alert.is-shown').is_present()
def dismiss_rerun_notification(self):
"""
Clicks the dismiss button in the rerun notification.
"""
self.q(css='.dismiss-button').click()
@property
def expand_collapse_link_state(self):
"""
Returns the current state of the expand/collapse link
"""
link = self.q(css=self.EXPAND_COLLAPSE_CSS)[0]
if not link.is_displayed():
return ExpandCollapseLinkState.MISSING
elif "collapse-all" in link.get_attribute("class"):
return ExpandCollapseLinkState.COLLAPSE
else:
return ExpandCollapseLinkState.EXPAND
@property
def reindex_button(self):
"""
Returns reindex button.
"""
return self.q(css=".button.button-reindex")[0]
def expand_all_subsections(self):
"""
Expands all the subsections in this course.
"""
for section in self.sections():
if section.is_collapsed:
section.expand_subsection()
for subsection in section.subsections():
if subsection.is_collapsed:
subsection.expand_subsection()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the outline page.
"""
return self.children(CourseOutlineChild)
@property
def license(self):
"""
Returns the course license text, if present. Else returns None.
"""
return self.q(css=".license-value").first.text[0]
@property
def deprecated_warning_visible(self):
"""
Returns true if the deprecated warning is visible.
"""
return self.q(css='.wrapper-alert-error.is-shown').is_present()
@property
def warning_heading_text(self):
"""
Returns deprecated warning heading text.
"""
return self.q(css='.warning-heading-text').text[0]
@property
def components_list_heading(self):
"""
Returns deprecated warning component list heading text.
"""
return self.q(css='.components-list-heading-text').text[0]
@property
def modules_remove_text_shown(self):
"""
Returns True if deprecated warning advance modules remove text is visible.
"""
return self.q(css='.advance-modules-remove-text').visible
@property
def modules_remove_text(self):
"""
Returns deprecated warning advance modules remove text.
"""
return self.q(css='.advance-modules-remove-text').text[0]
@property
def components_visible(self):
"""
Returns True if components list visible.
"""
return self.q(css='.components-list').visible
@property
def components_display_names(self):
"""
Returns deprecated warning components display name list.
"""
return self.q(css='.components-list li>a').text
@property
def deprecated_advance_modules(self):
"""
Returns deprecated advance modules list.
"""
return self.q(css='.advance-modules-list li').text
class CourseOutlineModal(object):
"""
Page object specifically for a modal window on the course outline page.
Subsections are handled slightly differently in some regards, and should use SubsectionOutlineModal.
"""
MODAL_SELECTOR = ".wrapper-modal-window"
def __init__(self, page):
self.page = page
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineModal` context.
"""
return " ".join([self.MODAL_SELECTOR, selector])
def is_shown(self):
"""
Return whether or not the modal defined by self.MODAL_SELECTOR is shown.
"""
return self.page.q(css=self.MODAL_SELECTOR).present
def find_css(self, selector):
"""
Find the given css selector on the page.
"""
return self.page.q(css=self._bounded_selector(selector))
def click(self, selector, index=0):
"""
Perform a Click action on the given selector.
"""
self.find_css(selector).nth(index).click()
def save(self):
"""
Click the save action button, and wait for the ajax call to return.
"""
self.click(".action-save")
self.page.wait_for_ajax()
def publish(self):
"""
Click the publish action button, and wait for the ajax call to return.
"""
self.click(".action-publish")
self.page.wait_for_ajax()
def cancel(self):
"""
Click the cancel action button.
"""
self.click(".action-cancel")
def has_release_date(self):
"""
Check if the input box for the release date exists in the subsection's settings window
"""
return self.find_css("#start_date").present
def has_release_time(self):
"""
Check if the input box for the release time exists in the subsection's settings window
"""
return self.find_css("#start_time").present
def has_due_date(self):
"""
Check if the input box for the due date exists in the subsection's settings window
"""
return self.find_css("#due_date").present
def has_due_time(self):
"""
Check if the input box for the due time exists in the subsection's settings window
"""
return self.find_css("#due_time").present
def has_policy(self):
"""
Check if the input for the grading policy is present.
"""
return self.find_css("#grading_type").present
def set_date(self, property_name, input_selector, date):
"""
Set `date` value to input pointed by `selector` and `property_name`.
"""
month, day, year = map(int, date.split('/'))
self.click(input_selector)
if getattr(self, property_name):
current_month, current_year = map(int, getattr(self, property_name).split('/')[1:])
else: # Use default timepicker values, which are current month and year.
current_month, current_year = datetime.datetime.today().month, datetime.datetime.today().year
date_diff = 12 * (year - current_year) + month - current_month
selector = "a.ui-datepicker-{}".format('next' if date_diff > 0 else 'prev')
for __ in xrange(abs(date_diff)):
self.page.q(css=selector).click()
self.page.q(css="a.ui-state-default").nth(day - 1).click() # set day
self.page.wait_for_element_invisibility("#ui-datepicker-div", "datepicker should be closed")
EmptyPromise(
lambda: getattr(self, property_name) == u'{m}/{d}/{y}'.format(m=month, d=day, y=year),
"{} is updated in modal.".format(property_name)
).fulfill()
def set_time(self, input_selector, time):
"""
Set `time` value to input pointed by `input_selector`
Not using the time picker to make sure it's not being rounded up
"""
self.page.q(css=input_selector).fill(time)
self.page.q(css=input_selector).results[0].send_keys(Keys.ENTER)
@property
def release_date(self):
"""
Returns the unit's release date. Date is "mm/dd/yyyy" string.
"""
return self.find_css("#start_date").first.attrs('value')[0]
@release_date.setter
def release_date(self, date):
"""
Sets the unit's release date to `date`. Date is "mm/dd/yyyy" string.
"""
self.set_date('release_date', "#start_date", date)
@property
def release_time(self):
"""
Returns the current value of the release time. Default is u'00:00'
"""
return self.find_css("#start_time").first.attrs('value')[0]
@release_time.setter
def release_time(self, time):
"""
Time is "HH:MM" string.
"""
self.set_time("#start_time", time)
@property
def due_date(self):
"""
Returns the due date from the page. Date is "mm/dd/yyyy" string.
"""
return self.find_css("#due_date").first.attrs('value')[0]
@due_date.setter
def due_date(self, date):
"""
Sets the due date for the unit. Date is "mm/dd/yyyy" string.
"""
self.set_date('due_date', "#due_date", date)
@property
def due_time(self):
"""
Returns the current value of the release time. Default is u''
"""
return self.find_css("#due_time").first.attrs('value')[0]
@due_time.setter
def due_time(self, time):
"""
Time is "HH:MM" string.
"""
self.set_time("#due_time", time)
@property
def policy(self):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
return self.get_selected_option_text(element)
@policy.setter
def policy(self, grading_label):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
select = Select(element)
select.select_by_visible_text(grading_label)
EmptyPromise(
lambda: self.policy == grading_label,
"Grading label is updated.",
).fulfill()
@property
def is_staff_lock_visible(self):
"""
Returns True if the staff lock option is visible.
"""
return self.find_css('#staff_lock').visible
def ensure_staff_lock_visible(self):
"""
Ensures the staff lock option is visible, clicking on the advanced tab
if needed.
"""
if not self.is_staff_lock_visible:
self.find_css(".settings-tab-button[data-tab=visibility]").click()
EmptyPromise(
lambda: self.is_staff_lock_visible,
"Staff lock option is visible",
).fulfill()
@property
def is_explicitly_locked(self):
"""
Returns true if the explict staff lock checkbox is checked, false otherwise.
"""
self.ensure_staff_lock_visible()
return self.find_css('#staff_lock')[0].is_selected()
@is_explicitly_locked.setter
def is_explicitly_locked(self, value):
"""
Checks the explicit staff lock box if value is true, otherwise selects "visible".
"""
self.ensure_staff_lock_visible()
if value != self.is_explicitly_locked:
self.find_css('#staff_lock').click()
EmptyPromise(lambda: value == self.is_explicitly_locked, "Explicit staff lock is updated").fulfill()
def shows_staff_lock_warning(self):
"""
Returns true iff the staff lock warning is visible.
"""
return self.find_css('.staff-lock .tip-warning').visible
def get_selected_option_text(self, element):
"""
Returns the text of the first selected option for the element.
"""
if element:
select = Select(element)
return select.first_selected_option.text
else:
return None
class SubsectionOutlineModal(CourseOutlineModal):
"""
Subclass to handle a few special cases with subsection modals.
"""
@property
def is_explicitly_locked(self):
"""
Override - returns True if staff_only is set.
"""
return self.subsection_visibility == 'staff_only'
@property
def subsection_visibility(self):
"""
Returns the current visibility setting for a subsection
"""
self.ensure_staff_lock_visible()
return self.find_css('input[name=content-visibility]:checked').first.attrs('value')[0]
@is_explicitly_locked.setter
def is_explicitly_locked(self, value): # pylint: disable=arguments-differ
"""
Override - sets visibility to staff_only if True, else 'visible'.
For hide_after_due, use the set_subsection_visibility method directly.
"""
self.subsection_visibility = 'staff_only' if value else 'visible'
@subsection_visibility.setter
def subsection_visibility(self, value):
"""
Sets the subsection visibility to the given value.
"""
self.ensure_staff_lock_visible()
self.find_css('input[name=content-visibility][value=' + value + ']').click()
EmptyPromise(lambda: value == self.subsection_visibility, "Subsection visibility is updated").fulfill()
@property
def is_staff_lock_visible(self):
"""
Override - Returns true if the staff lock option is visible.
"""
return self.find_css('input[name=content-visibility]').visible
| Stanford-Online/edx-platform | common/test/acceptance/pages/studio/overview.py | Python | agpl-3.0 | 40,144 | [
"VisIt"
] | 82f95bc286eeacf76d8acd57fa35882472ec05a91709aec0445bca084f99a214 |
import json
from .visitor import Visitor, visit
__all__ = ['print_ast']
def print_ast(ast):
return visit(ast, PrintingVisitor())
class PrintingVisitor(Visitor):
def leave_Name(self, node, *args):
return node.value
def leave_Variable(self, node, *args):
return '$' + node.name
def leave_Document(self, node, *args):
return join(node.definitions, '\n\n') + '\n'
def leave_OperationDefinition(self, node, *args):
name = node.name
selection_set = node.selection_set
if not name:
return selection_set
op = node.operation
defs = wrap('(', join(node.variable_definitions, ', '), ')')
directives = join(node.directives, ' ')
return join([op, join([name, defs]), directives, selection_set], ' ')
def leave_VariableDefinition(self, node, *args):
return node.variable + ': ' + node.type + wrap(' = ', node.default_value)
def leave_SelectionSet(self, node, *args):
return block(node.selections)
def leave_Field(self, node, *args):
return join([
wrap('', node.alias, ': ') + node.name + wrap('(', join(node.arguments, ', '), ')'),
join(node.directives, ' '),
node.selection_set
], ' ')
def leave_Argument(self, node, *args):
return node.name + ': ' + node.value
# Fragments
def leave_FragmentSpread(self, node, *args):
return '...' + node.name + wrap(' ', join(node.directives, ' '))
def leave_InlineFragment(self, node, *args):
return ('... on ' + node.type_condition + ' ' +
wrap('', join(node.directives, ' '), ' ') +
node.selection_set)
def leave_FragmentDefinition(self, node, *args):
return ('fragment {} on {} '.format(node.name, node.type_condition) +
wrap('', join(node.directives, ' '), ' ') +
node.selection_set)
# Value
def leave_IntValue(self, node, *args):
return node.value
def leave_FloatValue(self, node, *args):
return node.value
def leave_StringValue(self, node, *args):
return json.dumps(node.value)
def leave_BooleanValue(self, node, *args):
return json.dumps(node.value)
def leave_EnumValue(self, node, *args):
return node.value
def leave_ListValue(self, node, *args):
return '[' + join(node.values, ', ') + ']'
def leave_ObjectValue(self, node, *args):
return '{' + join(node.fields, ', ') + '}'
def leave_ObjectField(self, node, *args):
return node.name + ': ' + node.value
# Directive
def leave_Directive(self, node, *args):
return '@' + node.name + wrap('(', join(node.arguments, ', '), ')')
# Type
def leave_NamedType(self, node, *args):
return node.name
def leave_ListType(self, node, *args):
return '[' + node.type + ']'
def leave_NonNullType(self, node, *args):
return node.type + '!'
def join(maybe_list, separator=''):
if maybe_list:
return separator.join(filter(None, maybe_list))
return ''
def block(maybe_list):
if maybe_list:
return indent('{\n' + join(maybe_list, '\n')) + '\n}'
return ''
def wrap(start, maybe_str, end=''):
if maybe_str:
return start + maybe_str + end
return ''
def indent(maybe_str):
if maybe_str:
return maybe_str.replace('\n', '\n ')
return maybe_str
| gabriel-laet/graphql-py | graphql/core/language/printer.py | Python | mit | 3,464 | [
"VisIt"
] | 1c1b274cf409f4172710afa218e9e07690e1ee4b13cc1b113b611c746e467b6a |
from setuptools import setup
from ircu.consts import version_str
setup(
name='ircu',
version=version_str,
author='Brian Cline',
author_email='brian.cline@gmail.com',
description=('An IRC network state machine for Undernet ircu-based '
'services using the P10 protocol.'),
long_description=open('README.rst').read(),
license='MIT',
keywords='irc ircu service p10',
url='https://github.com/briancline/ircu-python',
packages=['ircu'],
install_requires=open('requirements.txt').readlines(),
test_suite='nose.collector',
zip_safe=False,
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: AIX',
'Operating System :: POSIX :: HP-UX',
'Operating System :: POSIX :: IRIX',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: POSIX :: BSD :: BSD/OS',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Communications :: Chat :: Internet Relay Chat',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| briancline/ircu-python | setup.py | Python | mit | 1,699 | [
"Brian"
] | cd9e3ee56e22bbc3efd1c7d20ed5f92872ad71b3db48dde106e4a21d71a8a7aa |
#!/usr/bin/env python
import sys
import os
import re
import gzip
from optparse import OptionParser
# biopython
from Bio import PDB
# TODO: normalize atom names, ie.
# O1P => OP1, O2P => OP2, itd.
# O3* => O3', itd.
def parse_args():
"""setup program options parsing"""
parser = OptionParser(description="normalize PDB file (change residue numbers, chains, reverse)")
parser.add_option("-o", "--output", dest="output",
help="save output to FILE", metavar="FILE")
parser.add_option("-i", "--input", dest="input",
help="read input from FILE", metavar="FILE")
parser.add_option("--reverse", dest="reverse", action="store_true",
help="reverse the order of the residues",
default=False)
parser.add_option("--reverse-id", dest="reverse_id", action="store_true",
help="reverse the id of the residues",
default=False)
parser.add_option("--residues-limit", dest="residues_limit",
help="save only N residues", metavar="N")
(options, args) = parser.parse_args()
return (parser, options, args)
def reverse_structure(s):
new_s = PDB.Structure.Structure("reversed")
for model in s:
new_model = PDB.Model.Model(model.id)
new_chains = []
for chain in model:
new_chain = PDB.Chain.Chain(chain.id)
residues = []
for r in chain:
residues.append(r)
residues.reverse()
for r in residues:
new_chain.add(r)
new_chains.append(new_chain)
new_chains.reverse()
for new_chain in new_chains:
new_model.add(new_chain)
new_s.add(new_model)
return new_s
def normalize_pdb(inp_fn, out_fn, reverse=False, reverse_id=False, limit=None):
"""generate out_fn and returns array of tuples
(old_chain,old_res_id), (new_chain, new_res_id)"""
numbers = []
parser = PDB.PDBParser()
if re.match("^.*.gz",inp_fn):
inp = gzip.open(inp_fn)
else:
inp = open(inp_fn)
s = parser.get_structure("c",inp)
inp.close()
total = 0
for model in list(s):
n = len([r for r in model.get_residues()])
for chain in list(model):
for i, r in enumerate(list(chain)):
total += 1
if limit is not None and total>limit:
chain.detach_child(r.get_id())
continue
old_id = r.get_id()
old_num = (str(old_id[1])+str(old_id[2])).strip()
old_chain = r.get_parent().get_id()
if reverse_id:
new_num = n-i
else:
new_num = i
new_id = (old_id[0], new_num, ' ') # we ignore old_id[2]
numbers.append(((old_chain, old_num), (old_chain, str(new_num))))
# print old_id, new_id
r.id = new_id
if len(chain)==0:
model.detach_child(chain.id)
if len(model)==0:
s.detach_child(model.id)
if reverse:
s = reverse_structure(s)
if limit is not None:
c = len(list(s.get_residues()))
print("number of residues=%s" % c)
assert c<=limit
# from http://biopython.org/wiki/Remove_PDB_disordered_atoms
class NotDisordered(PDB.Select):
def accept_atom(self, atom):
return not atom.is_disordered() or atom.get_altloc()=='A'
io = PDB.PDBIO()
io.set_structure(s)
io.save(out_fn, select=NotDisordered())
return numbers
def main():
(parser, options, args) = parse_args()
if options.input and options.output:
limit = None
if options.residues_limit is not None:
limit = int(options.residues_limit)
normalize_pdb(options.input, options.output, options.reverse, options.reverse_id, limit=limit)
else:
print("specify input and output")
parser.print_help()
exit(1)
if __name__ == '__main__':
main()
| mmagnus/rna-pdb-tools | rna_tools/tools/clarna_play/ClaRNAlib/normalize_pdb.py | Python | gpl-3.0 | 4,083 | [
"Biopython"
] | 6ef1e07b04b06beba320864a8341a8401b6dc4e959086e82af15d3cdf84319d0 |
import numpy as np
try:
# use scipy if available: it's faster
from scipy.fftpack import fft, ifft, fftshift, ifftshift
except:
from numpy.fft import fft, ifft, fftshift, ifftshift
def FT_continuous(t, h, axis=-1, method=1):
"""Approximate a continuous 1D Fourier Transform with sampled data.
This function uses the Fast Fourier Transform to approximate
the continuous fourier transform of a sampled function, using
the convention
.. math::
H(f) = \int h(t) exp(-2 \pi i f t) dt
It returns f and H, which approximate H(f).
Parameters
----------
t : array_like
regularly sampled array of times
t is assumed to be regularly spaced, i.e.
t = t0 + Dt * np.arange(N)
h : array_like
real or complex signal at each time
axis : int
axis along which to perform fourier transform.
This axis must be the same length as t.
Returns
-------
f : ndarray
frequencies of result. Units are the same as 1/t
H : ndarray
Fourier coefficients at each frequency.
"""
assert t.ndim == 1
assert h.shape[axis] == t.shape[0]
N = len(t)
if N % 2 != 0:
raise ValueError("number of samples must be even")
Dt = t[1] - t[0]
Df = 1. / (N * Dt)
t0 = t[N / 2]
f = Df * (np.arange(N) - N / 2)
shape = np.ones(h.ndim, dtype=int)
shape[axis] = N
phase = np.ones(N)
phase[1::2] = -1
phase = phase.reshape(shape)
if method == 1:
H = Dt * fft(h * phase, axis=axis)
else:
H = Dt * fftshift(fft(h, axis=axis), axes=axis)
H *= phase
H *= np.exp(-2j * np.pi * t0 * f.reshape(shape))
H *= np.exp(-1j * np.pi * N / 2)
return f, H
def IFT_continuous(f, H, axis=-1, method=1):
"""Approximate a continuous 1D Inverse Fourier Transform with sampled data.
This function uses the Fast Fourier Transform to approximate
the continuous fourier transform of a sampled function, using
the convention
.. math::
H(f) = integral[ h(t) exp(-2 pi i f t) dt]
h(t) = integral[ H(f) exp(2 pi i f t) dt]
It returns t and h, which approximate h(t).
Parameters
----------
f : array_like
regularly sampled array of times
t is assumed to be regularly spaced, i.e.
f = f0 + Df * np.arange(N)
H : array_like
real or complex signal at each time
axis : int
axis along which to perform fourier transform.
This axis must be the same length as t.
Returns
-------
f : ndarray
frequencies of result. Units are the same as 1/t
H : ndarray
Fourier coefficients at each frequency.
"""
assert f.ndim == 1
assert H.shape[axis] == f.shape[0]
N = len(f)
if N % 2 != 0:
raise ValueError("number of samples must be even")
f0 = f[0]
Df = f[1] - f[0]
t0 = -0.5 / Df
Dt = 1. / (N * Df)
t = t0 + Dt * np.arange(N)
shape = np.ones(H.ndim, dtype=int)
shape[axis] = N
t_calc = t.reshape(shape)
f_calc = f.reshape(shape)
H_prime = H * np.exp(2j * np.pi * t0 * f_calc)
h_prime = ifft(H_prime, axis=axis)
h = N * Df * np.exp(2j * np.pi * f0 * (t_calc - t0)) * h_prime
return t, h
def PSD_continuous(t, h, axis=-1, method=1):
"""Approximate a continuous 1D Power Spectral Density of sampled data.
This function uses the Fast Fourier Transform to approximate
the continuous fourier transform of a sampled function, using
the convention
.. math::
H(f) = \int h(t) \exp(-2 \pi i f t) dt
It returns f and PSD, which approximate PSD(f) where
.. math::
PSD(f) = |H(f)|^2 + |H(-f)|^2
Parameters
----------
t : array_like
regularly sampled array of times
t is assumed to be regularly spaced, i.e.
t = t0 + Dt * np.arange(N)
h : array_like
real or complex signal at each time
axis : int
axis along which to perform fourier transform.
This axis must be the same length as t.
Returns
-------
f : ndarray
frequencies of result. Units are the same as 1/t
PSD : ndarray
Fourier coefficients at each frequency.
"""
assert t.ndim == 1
assert h.shape[axis] == t.shape[0]
N = len(t)
if N % 2 != 0:
raise ValueError("number of samples must be even")
ax = axis % h.ndim
if method == 1:
# use FT_continuous
f, Hf = FT_continuous(t, h, axis)
Hf = np.rollaxis(Hf, ax)
f = -f[N / 2::-1]
PSD = abs(Hf[N / 2::-1]) ** 2
PSD[:-1] += abs(Hf[N / 2:]) ** 2
PSD = np.rollaxis(PSD, 0, ax + 1)
else:
# A faster way to do it is with fftshift
# take advantage of the fact that phases go away
Dt = t[1] - t[0]
Df = 1. / (N * Dt)
f = Df * np.arange(N / 2 + 1)
Hf = fft(h, axis=axis)
Hf = np.rollaxis(Hf, ax)
PSD = abs(Hf[:N / 2 + 1]) ** 2
PSD[-1] = 0
PSD[1:] += abs(Hf[N / 2:][::-1]) ** 2
PSD[0] *= 2
PSD = Dt ** 2 * np.rollaxis(PSD, 0, ax + 1)
return f, PSD
def sinegauss(t, t0, f0, Q):
"""Sine-gaussian wavelet"""
a = (f0 * 1. / Q) ** 2
return (np.exp(-a * (t - t0) ** 2)
* np.exp(2j * np.pi * f0 * (t - t0)))
def sinegauss_FT(f, t0, f0, Q):
"""Fourier transform of the sine-gaussian wavelet.
This uses the convention
.. math::
H(f) = integral[ h(t) exp(-2pi i f t) dt]
"""
a = (f0 * 1. / Q) ** 2
return (np.sqrt(np.pi / a)
* np.exp(-2j * np.pi * f * t0)
* np.exp(-np.pi ** 2 * (f - f0) ** 2 / a))
def sinegauss_PSD(f, t0, f0, Q):
"""Compute the PSD of the sine-gaussian function at frequency f
.. math::
PSD(f) = |H(f)|^2 + |H(-f)|^2
"""
a = (f0 * 1. / Q) ** 2
Pf = np.pi / a * np.exp(-2 * np.pi ** 2 * (f - f0) ** 2 / a)
Pmf = np.pi / a * np.exp(-2 * np.pi ** 2 * (-f - f0) ** 2 / a)
return Pf + Pmf
def wavelet_PSD(t, h, f0, Q=1.0):
"""Compute the wavelet PSD as a function of f0 and t
Parameters
----------
t : array_like
array of times, length N
h : array_like
array of observed values, length N
f0 : array_like
array of candidate frequencies, length Nf
Q : float
Q-parameter for wavelet
Returns
-------
PSD : ndarray
The 2-dimensional PSD, of shape (Nf, N), corresponding with
frequencies f0 and times t.
"""
t, h, f0 = map(np.asarray, (t, h, f0))
if (t.ndim != 1) or (t.shape != h.shape):
raise ValueError('t and h must be one dimensional and the same shape')
if f0.ndim != 1:
raise ValueError('f0 must be one dimensional')
Q = Q + np.zeros_like(f0)
f, H = FT_continuous(t, h)
W = np.conj(sinegauss_FT(f, 0, f0[:, None], Q[:, None]))
_, HW = IFT_continuous(f, H * W)
return abs(HW) ** 2
| nhuntwalker/astroML | astroML/fourier.py | Python | bsd-2-clause | 6,982 | [
"Gaussian"
] | a6d50e5a942920098d298a8ccb6ea7c0038b3c6ad5aac6dbb74c3595cfcf28a6 |
intro = """
<br>
You have subscribed to receive alerts from <a href='http://www.globalforestwatch.org/'>Global Forest Watch</a>. This message reports new forest change alerts and user stories for the area of interest you selected. You will receive a separate email for each distinct area of interest you subscribe to.
<br><br>
"""
header="""
<b><u>Selected area</u>: {selected_area_name}</b>
<br><br>
"""
table_header = """
<table style="border-collapse: collapse;">
<tr>
<th style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;" ><b>New Alerts</b></th>
<th style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;" ><b>Type of Alert</b></th>
<th style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;" ><b>Date of Alerts*</b></th>
<th style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;" ><b>Summary</b></th>
<th style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;" ><b>Specs</b></th>
<th style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;" ><b>View & Download</b></th>
</tr>
"""
table_row = """
<tr>
<td style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;text-align:center;" >{alerts}</td>
<td style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;text-align:center;" >{email_name}</td>
<td style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;text-align:center;" >{date_range}</td>
<td style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;" >{summary}</td>
<td style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;" >{alert_types}{specs}</td>
<td style="border: solid 1px #bbbbbb;padding:2px;font-size:82%;text-align:center;" ><a href='{url}'>Link</a></td>
</tr>
"""
table_footer = """
</table>
"""
outro = """
<br>
<em style="font-size:90%;margin-top:5px;">*"Date of Alerts" refers to the date range within which change was actually detected. There may be lag time between detection and when you receive this email.</em>
<br>
<br>
Please note that this information is subject to the Global Forest Watch <a href='http://globalforestwatch.com/terms'>Terms of Service</a>. You can unsubscribe or manage your subscriptions by emailing gfw@wri.org. Please visit <a href='http://fires.globalforestwatch.org/#v=home&x=115&y=0&l=5&lyrs=Active_Fires'>GFW Fires</a> to subscribe to receive fire alerts.
<br>
<br>
<br>
"""
link_country_iso = """http://www.globalforestwatch.org/country/{iso}"""
link_country_id1 = """http://www.globalforestwatch.org/country/{iso}/{id1}"""
link_geom = """http://www.globalforestwatch.org/map/3/{lat}/{lon}/ALL/grayscale/{url_id}?geojson={geom}&begin={min_date}&end={max_date}"""
link_iso = """http://www.globalforestwatch.org/map/4/0/0/{iso}/grayscale/{url_id}?begin={min_date}&end={max_date}"""
| aagm/gfw-api | gfw/mailers/digest_mailer.py | Python | gpl-2.0 | 2,740 | [
"VisIt"
] | 699973391ec0fa76d4fefd665d99a4af0d16b9f95a61ada38e15b9f48693e6f0 |
# $HeadURL$
"""
Set of utilities to retrieve Information from proxy
"""
__RCSID__ = "$Id$"
import base64
import types
import inspect
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Security.X509Chain import X509Chain, g_X509ChainType
from DIRAC.Core.Security.VOMS import VOMS
from DIRAC.Core.Security import Locations
__NOTIFIED_CALLERS = set()
stack = inspect.stack()
caller = ( stack[1][1], stack[1][2] )
if caller not in __NOTIFIED_CALLERS:
print
print 'From %s at line %s:' % caller
print '[Deprecation warning] DIRAC.Core.Security.Misc will not be available in next release,'
print ' use DIRAC.Core.Security.ProxyInfo instead.'
print
__NOTIFIED_CALLERS.add( caller )
def getProxyInfo( proxy = False, disableVOMS = False ):
"""
Returns a dict with all the proxy info
* values that will be there always
'chain' : chain object containing the proxy
'subject' : subject of the proxy
'issuer' : issuer of the proxy
'isProxy' : bool
'isLimitedProxy' : bool
'validDN' : Valid DN in DIRAC
'validGroup' : Valid Group in DIRAC
'secondsLeft' : Seconds left
* values that can be there
'path' : path to the file,
'group' : DIRAC group
'groupProperties' : Properties that apply to the DIRAC Group
'username' : DIRAC username
'identity' : DN that generated the proxy
'hostname' : DIRAC host nickname
'VOMS'
"""
#Discover proxy location
proxyLocation = False
if type( proxy ) == g_X509ChainType:
chain = proxy
else:
if not proxy:
proxyLocation = Locations.getProxyLocation()
elif type( proxy ) in ( types.StringType, types.UnicodeType ):
proxyLocation = proxy
if not proxyLocation:
return S_ERROR( "Can't find a valid proxy" )
chain = X509Chain()
retVal = chain.loadProxyFromFile( proxyLocation )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't load %s: %s " % ( proxyLocation, retVal[ 'Message' ] ) )
retVal = chain.getCredentials()
if not retVal[ 'OK' ]:
return retVal
infoDict = retVal[ 'Value' ]
infoDict[ 'chain' ] = chain
if proxyLocation:
infoDict[ 'path' ] = proxyLocation
if not disableVOMS and chain.isVOMS()['Value']:
infoDict[ 'hasVOMS' ] = True
retVal = VOMS().getVOMSAttributes( chain )
if retVal[ 'OK' ]:
infoDict[ 'VOMS' ] = retVal[ 'Value' ]
else:
infoDict[ 'VOMSError' ] = retVal[ 'Message' ].strip()
return S_OK( infoDict )
def getProxyInfoAsString( proxyLoc = False, disableVOMS = False ):
"""
return the info as a printable string
"""
retVal = getProxyInfo( proxyLoc, disableVOMS )
if not retVal[ 'OK' ]:
return retVal
infoDict = retVal[ 'Value' ]
return S_OK( formatProxyInfoAsString( infoDict ) )
def formatProxyInfoAsString( infoDict ):
"""
convert a proxy infoDict into a string
"""
leftAlign = 13
contentList = []
for field in ( 'subject', 'issuer', 'identity', ( 'secondsLeft', 'timeleft' ),
( 'group', 'DIRAC group' ), 'path', 'username',
( 'hasVOMS', 'VOMS' ), ( 'VOMS', 'VOMS fqan' ), ( 'VOMSError', 'VOMS Error' ) ):
if type( field ) == types.StringType:
dispField = field
else:
dispField = field[1]
field = field[0]
if not field in infoDict:
continue
if field == 'secondsLeft':
secs = infoDict[ field ]
hours = int( secs / 3600 )
secs -= hours * 3600
mins = int( secs / 60 )
secs -= mins * 60
value = "%02d:%02d:%02d" % ( hours, mins, secs )
else:
value = infoDict[ field ]
contentList.append( "%s: %s" % ( dispField.ljust( leftAlign ), value ) )
return "\n".join( contentList )
def getProxyStepsInfo( chain ):
"""
Extended information of all Steps in the ProxyChain
Returns a list of dictionary with Info for each Step.
"""
infoList = []
nC = chain.getNumCertsInChain()['Value']
for i in range( nC ):
cert = chain.getCertInChain( i )['Value']
stepInfo = {}
stepInfo[ 'subject' ] = cert.getSubjectDN()['Value']
stepInfo[ 'issuer' ] = cert.getIssuerDN()['Value']
stepInfo[ 'serial' ] = cert.getSerialNumber()['Value']
stepInfo[ 'not before' ] = cert.getNotBeforeDate()['Value']
stepInfo[ 'not after' ] = cert.getNotAfterDate()['Value']
stepInfo[ 'lifetime' ] = cert.getRemainingSecs()['Value']
stepInfo[ 'extensions' ] = cert.getExtensions()[ 'Value' ]
dG = cert.getDIRACGroup( ignoreDefault = True )['Value']
if dG:
stepInfo[ 'group' ] = dG
if cert.hasVOMSExtensions()[ 'Value' ]:
stepInfo[ 'VOMS ext' ] = True
infoList.append( stepInfo )
return S_OK( infoList )
def formatProxyStepsInfoAsString( infoList ):
"""
Format the List of Proxy steps dictionaries as a printable string.
"""
contentsList = []
for i in range( len( infoList ) ):
contentsList.append( " + Step %s" % i )
stepInfo = infoList[i]
for key in ( 'subject', 'issuer', 'serial', 'not after', 'not before',
'group', 'VOMS ext', 'lifetime', 'extensions' ):
if key in stepInfo:
value = stepInfo[ key ]
if key == 'serial':
value = base64.b16encode( value )
if key == 'lifetime':
secs = value
hours = int( secs / 3600 )
secs -= hours * 3600
mins = int( secs / 60 )
secs -= mins * 60
value = "%02d:%02d:%02d" % ( hours, mins, secs )
if key == "extensions":
value = "\n %s" % "\n ".join( [ "%s = %s" % ( extName.strip().rjust( 20 ), extValue.strip() )
for extName, extValue in value ] )
contentsList.append( " %s : %s" % ( key.ljust( 10 ).capitalize(), value ) )
return "\n".join( contentsList )
| avedaee/DIRAC | Core/Security/Misc.py | Python | gpl-3.0 | 5,743 | [
"DIRAC"
] | e2e67a057570f444a8cdb6c1a25d91b3573280602f5c73e8c26c35a4de3d3cc7 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui import Signal
from agui.widgets import Window
from agui.extras import Icon
from remindor import functions
from .remindor_common.ui_helpers import SimpleDialogInfo
class SimpleDialog(object):
def __init__(self, parent=None):
self.info = SimpleDialogInfo()
self.saved = Signal()
self.dialog = Window('simple_dialog', functions.ui_file('simple'), parent) #TODO: translate window
self.dialog.closed.connect(self.dialog.close)
self.dialog.widgets.reminder_edit.has_clear = True
self.dialog.widgets.reminder_edit.has_error = True
self.dialog.widgets.reminder_edit.hide_error()
self.dialog.widgets.reminder_edit.changed.connect(self.validate)
self.dialog.widgets.help_button.activated.connect(self.help)
self.dialog.widgets.cancel_button.activated.connect(self.dialog.close)
self.dialog.widgets.add_button.activated.connect(self.save)
self.dialog.resize(350, 50)
self.dialog.show()
def help(self):
pass #TODO
def validate(self, text):
if self.info.validate(text):
self.dialog.widgets.reminder_edit.hide_error()
self.dialog.widgets.add_button.enable()
else:
self.dialog.widgets.reminder_edit.show_error()
self.dialog.widgets.add_button.disable()
def save(self):
text = self.dialog.widgets.reminder_edit.text
id = self.info.reminder(text)
if id is not None:
self.saved.emit(id)
self.dialog.close()
else:
self.validate(self.dialog.widgets.reminder_edit.text)
| bhdouglass/remindor | remindor/simple_dialog.py | Python | gpl-3.0 | 2,392 | [
"Brian"
] | 45e069a1fc6669cb77d710b9564f3c9517832aca50e17180bf54f565b236ae55 |
#!/usr/bin/env python
"""
Wraps genetrack.scripts.peakpred so the tool can be executed from Galaxy.
usage: %prog input output level sigma mode exclusion strand
"""
import sys
from galaxy import eggs
import pkg_resources
pkg_resources.require( "GeneTrack" )
from genetrack.scripts import peakpred
from genetrack import logger
if __name__ == "__main__":
parser = peakpred.option_parser()
options, args = parser.parse_args()
logger.disable(options.verbosity)
from genetrack import conf
# trigger test mode
if options.test:
options.inpname = conf.testdata('test-hdflib-input.gtrack')
options.outname = conf.testdata('predictions.bed')
# missing input file name
if not options.inpname and not options.outname:
parser.print_help()
else:
print 'Sigma = %s' % options.sigma
print 'Minimum peak = %s' % options.level
print 'Peak-to-peak = %s' % options.exclude
peakpred.predict(options.inpname, options.outname, options)
| volpino/Yeps-EURAC | tools/visualization/genetrack_peak_prediction.py | Python | mit | 1,016 | [
"Galaxy"
] | 258df0d7e2e7f3d17783f1606798a4dc7bca88ae060aa8491ce450bbca05e4e4 |
"""
Module with RecXElectrode from Allen Brain Institute
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
# Allen Institute Software License - This software license is the 2-clause BSD license plus clause a third
# clause that prohibits redistribution for commercial purposes without further permission.
#
# Copyright 2017. Allen Institute. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Redistributions for commercial purposes are not permitted without the Allen Institute's written permission. For
# purposes of this license, commercial purposes is the incorporation of the Allen Institute's software into anything for
# which you will charge fees or other compensation. Contact terms@alleninstitute.org for commercial licensing
# opportunities.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Adapted to NetPyNE by salvadordura@gmail.com
#
from builtins import range
from future import standard_library
standard_library.install_aliases()
import numpy as np
import math
class RecXElectrode(object):
"""Extracellular electrode
"""
def __init__(self, sim):
"""Create an array"""
self.cfg = sim.cfg
try:
self.pos = np.array(sim.cfg.recordLFP).T # convert coordinates to ndarray, The first index is xyz and the second is the channel number
assert len(self.pos.shape) == 2
assert self.pos.shape[0] == 3
self.pos[1,:] *= -1 # invert y-axis since by convention assume it refers to depth (eg cortical depth)
self.nsites = self.pos.shape[0]
self.transferResistances = {}
except:
print('Error creating extracellular electrode: sim.cfg.recordLFP should contain a list of x,y,z locations')
return None
self.nsites = self.pos.shape[1]
self.transferResistances = {} # V_e = transfer_resistance*Im
def getTransferResistance(self, gid):
return self.transferResistances[gid]
def calcTransferResistance(self, gid, seg_coords):
"""Precompute mapping from segment to electrode locations"""
sigma = 0.3 # mS/mm
# Value used in NEURON extracellular recording example ("extracellular_stim_and_rec")
# rho = 35.4 # ohm cm, squid axon cytoplasm = 2.8249e-2 S/cm = 0.028 S/cm = 0.0028 S/mm = 2.8 mS/mm
# rho_um = 35.4 * 0.01 = 35.4 / 1e6 * 1e4 = 0.354 Mohm um ~= 3 uS / um = 3000 uS / mm = 3 mS /mm
# equivalent sigma value (~3) is 10x larger than Allen (0.3)
# if use same sigma value, results are consistent
r05 = (seg_coords['p0'] + seg_coords['p1'])/2
dl = seg_coords['p1'] - seg_coords['p0']
nseg = r05.shape[1]
tr = np.zeros((self.nsites,nseg))
# tr_NEURON = np.zeros((self.nsites,nseg)) # used to compare with NEURON extracellular example
for j in range(self.nsites): # calculate mapping for each site on the electrode
rel = np.expand_dims(self.pos[:, j], axis=1) # coordinates of a j-th site on the electrode
rel_05 = rel - r05 # distance between electrode and segment centers
r2 = np.einsum('ij,ij->j', rel_05, rel_05) # compute dot product column-wise, the resulting array has as many columns as original
rlldl = np.einsum('ij,ij->j', rel_05, dl) # compute dot product column-wise, the resulting array has as many columns as original
dlmag = np.linalg.norm(dl, axis=0) # length of each segment
rll = abs(rlldl/dlmag) # component of r parallel to the segment axis it must be always positive
rT2 = r2 - rll**2 # square of perpendicular component
up = rll + dlmag/2
low = rll - dlmag/2
num = up + np.sqrt(up**2 + rT2)
den = low + np.sqrt(low**2 + rT2)
tr[j, :] = np.log(num/den)/dlmag # units of (1/um) use with imemb_ (total seg current)
# Consistent with NEURON extracellular recording example
# r = np.sqrt(rel_05[0,:]**2 + rel_05[1,:]**2 + rel_05[2,:]**2)
# tr_NEURON[j, :] = (rho / 4 / math.pi)*(1/r)*0.01
tr *= 1/(4*math.pi*sigma) # units: 1/um / (mS/mm) = mm/um / mS = 1e3 * kOhm = MOhm
self.transferResistances[gid] = tr
| Neurosim-lab/netpyne | netpyne/support/recxelectrode.py | Python | mit | 5,581 | [
"NEURON"
] | e25a8e1b0990d0e32c334cecb2b0db7317cf8cbc14df3f59e1f46fcdbe5956e5 |
import numpy as np
import pytest
from pysisyphus.calculators.AFIR import AFIR
from pysisyphus.calculators.PySCF import PySCF
from pysisyphus.calculators import XTB
from pysisyphus.constants import AU2KJPERMOL
from pysisyphus.helpers import geom_loader
from pysisyphus.init_logging import init_logging
from pysisyphus.optimizers.RFOptimizer import RFOptimizer
from pysisyphus.testing import using
init_logging()
@pytest.mark.parametrize(
"calc_cls, calc_kwargs, ref_cycle, ccl_dist, oc_dist",
[
pytest.param(
PySCF,
{"basis": "6-31g*", "xc": "b3lyp", "pal": 2},
28,
4.794052,
2.677647,
marks=using("pyscf"),
),
pytest.param(XTB, {}, 27, 5.244300, 2.6294451, marks=using("xtb")),
],
)
def test_ohch3f_anion(calc_cls, calc_kwargs, ref_cycle, ccl_dist, oc_dist):
"""Example (R1) from
https://aip.scitation.org/doi/pdf/10.1063/1.3457903?class=pdf
See Fig. 3 and Fig. 4
"""
geom = geom_loader("lib:ohch3f_anion_cs.xyz")
# OH group is a fragment
fragment_indices = [
(5, 6),
]
gamma = 100 / AU2KJPERMOL
calc = calc_cls(charge=-1, **calc_kwargs)
afir = AFIR(calc, fragment_indices, gamma, ignore_hydrogen=True)
geom.set_calculator(afir)
opt = RFOptimizer(geom, dump=True, trust_max=0.3)
opt.run()
assert opt.is_converged
assert opt.cur_cycle == ref_cycle
# Broken C-Cl bond
c3d = geom.coords3d
assert np.linalg.norm(c3d[0] - c3d[4]) == pytest.approx(ccl_dist, abs=1e-4)
# Formed O-C bond
assert np.linalg.norm(c3d[0] - c3d[5]) == pytest.approx(oc_dist, abs=1e-4)
@using("xtb")
def test_three_frag_afir():
geom = geom_loader("lib:afir3test.xyz", coord_type="redund")
fragment_indices = [
(0, 1, 2),
(3, 4, 5, 6),
]
calc = XTB()
gamma = 150 / AU2KJPERMOL
afir = AFIR(calc, fragment_indices, gamma, ignore_hydrogen=False)
geom.set_calculator(afir)
opt = RFOptimizer(geom, dump=True, overachieve_factor=2)
opt.run()
assert opt.is_converged
assert opt.cur_cycle == 34
assert geom.energy == pytest.approx(-22.575014)
c3d = geom.coords3d
assert np.linalg.norm(c3d[3] - c3d[9]) == pytest.approx(2.6235122)
assert np.linalg.norm(c3d[2] - c3d[0]) == pytest.approx(3.8615080)
| eljost/pysisyphus | tests/test_afir/test_afir.py | Python | gpl-3.0 | 2,352 | [
"PySCF",
"xTB"
] | ab58b423e7bc135bce0241c75c4914ca992f1878921123828cb067ea652714ed |
# -*- coding: utf-8 -*-
#
# Copyright 2008 - 2013 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
#
# load libraries
#
import visa
class GPIBInstrument(visa.GpibInstrument):
def clear_status(self):
self.write('*CLS')
def reset(self):
self.write('*RST')
# identity property
def __get_identity(self):
return self.ask('*IDN?')
identity = property(__get_identity)
## class VISAInstrument(wx.Panel, pythics.libcontrols.Control):
## @pythics.libcontrols.catch_exception
## def __init__(self, parent, key='', global_access=False,
## label='',
## *args, **kwargs):
## ## wx.StaticBox.__init__(self, parent, wx.ID_ANY, label=label,
## ## **kwargs)
## ## self.box_sizer = wx.StaticBoxSizer(self, wx.VERTICAL)
## ## t = wx.StaticText(parent, -1, "Controls placed \"inside\" the box are really its siblings")
## ## self.box_sizer.Add(t, 0, wx.TOP|wx.LEFT, 10)
## wx.Panel.__init__(self, parent, wx.ID_ANY, *args, **kwargs)
## self.panel_sizer = wx.BoxSizer(wx.VERTICAL)
## self.box = wx.StaticBox(self, wx.ID_ANY, label=label)
## self.panel_sizer.Add(self.box, proportion=0, flag=wx.EXPAND, border=0)
## self.SetSizer(self.panel_sizer)
## self.box_sizer = wx.StaticBoxSizer(self.box, wx.VERTICAL)
## t = wx.StaticText(self, -1, "Controls placed \"inside\" the box are really its siblings")
## t2 = wx.StaticText(self, -1, "Another line of text")
## t3 = wx.StaticText(self, -1, "Another line of text")
## t4 = wx.StaticText(self, -1, "Another line of text")
## self.box_sizer.Add(t, proportion=0, flag=wx.TOP|wx.LEFT, border=5)
## self.box_sizer.Add(t2, proportion=0, flag=wx.TOP|wx.LEFT, border=5)
## self.box_sizer.Add(t3, proportion=0, flag=wx.TOP|wx.LEFT, border=5)
## self.box_sizer.Add(t4, proportion=0, flag=wx.TOP|wx.LEFT, border=5)
## self.box_sizer.Layout()
## self.box_sizer.SetSizeHints(self.box)
## self.panel_sizer.Layout()
## self.panel_sizer.SetSizeHints(self)
## pythics.libcontrols.Control.__init__(self, parent,
## action=None,
## key=key,
## save=False,
## global_access=global_access)
## def generate_proxy(self, return_queue):
## return VISAInstrumentProxy(self, return_queue)
## #
## # Proxy
## #
## class VISAInstrumentProxy(pythics.libcontrols.ControlProxy):
## @pythics.libcontrols.gui_call
## def trigger_action(self):
## self._control.run_action_no_event()
## # action property
## @pythics.libcontrols.gui_call
## def __get_action(self):
## return self._control.action
## @pythics.libcontrols.gui_call
## def __set_action(self, value):
## self._control.action = value
## action = property(__get_action, __set_action)
## # label property
## @pythics.libcontrols.gui_call
## def __get_label(self):
## return self._control.GetLabel()
## @pythics.libcontrols.gui_call
## def __set_label(self, value):
## self._control.SetLabel(value)
## label = property(__get_label, __set_label)
| LunarLanding/Pythics | pythics/libinstrument.py | Python | gpl-3.0 | 4,074 | [
"Brian"
] | 215c1c23e81740f38fb60addb7b8e3c554811052ab14871abb54531ca1e3cd26 |
#!/usr/bin/env python
"""
Script to build a "binary wheel" for the 'pip' Python package manager for
the LAMMPS python module which includes the shared library file. After a
successful build the script attempts to install the wheel into a system
specific site-packages folder or - failing that - into the corresponding
user site-packages folder. Called from the 'install-python' build target
in the GNU make and CMake based build systems. Can also be called
independently and used to build the wheel without installing it.
"""
from __future__ import print_function
import sys,os,shutil,time,glob,subprocess
from argparse import ArgumentParser
parser = ArgumentParser(prog='install.py',
description='LAMMPS python package installer script')
parser.add_argument("-p", "--package", required=True,
help="path to the LAMMPS Python package")
parser.add_argument("-l", "--lib", required=True,
help="path to the compiled LAMMPS shared library")
parser.add_argument("-n", "--noinstall", action="store_true", default=False,
help="only build a binary wheel. Don't attempt to install it")
args = parser.parse_args()
# validate arguments and make paths absolute
if args.package:
if not os.path.exists(args.package):
print( "ERROR: LAMMPS package %s does not exist" % args.package)
parser.print_help()
sys.exit(1)
else:
args.package = os.path.abspath(args.package)
if args.lib:
if not os.path.exists(args.lib):
print( "ERROR: LAMMPS shared library %s does not exist" % args.lib)
parser.print_help()
sys.exit(1)
else:
args.lib = os.path.abspath(args.lib)
# we need to switch to the folder of the python package
olddir = os.path.abspath('.')
os.chdir(os.path.dirname(args.package))
# remove any wheel files left over from previous calls
print("Purging existing wheels...")
for wheel in glob.glob('lammps-*.whl'):
print("deleting " + wheel)
os.remove(wheel)
# copy shared object to the current folder so that
# it will show up in the installation at the expected location
os.putenv('LAMMPS_SHARED_LIB',os.path.basename(args.lib))
shutil.copy(args.lib,'lammps')
# create a virtual environment for building the wheel
shutil.rmtree('buildwheel',True)
try:
txt = subprocess.check_output([sys.executable, '-m', 'virtualenv', 'buildwheel', '-p', sys.executable], stderr=subprocess.STDOUT, shell=False)
print(txt.decode('UTF-8'))
except subprocess.CalledProcessError as err:
sys.exit("Failed to create a virtualenv: {0}".format(err.output.decode('UTF-8')))
# now run the commands to build the wheel. those must be in a separate script
# and run in subprocess, since this will use the virtual environment and
# there is no simple way to return from that in python.
os.system(sys.executable + ' makewheel.py')
# remove temporary folders and files
shutil.rmtree('buildwheel',True)
shutil.rmtree('build',True)
shutil.rmtree('lammps.egg-info',True)
os.remove(os.path.join('lammps',os.path.basename(args.lib)))
# stop here if we were asked not to install the wheel we created
if args.noinstall:
exit(0)
# install the wheel with pip. first try to install in the default environment.
# that will be a virtual environment, if active, or the system folder.
# recent versions of pip will automatically drop to use the user folder
# in case the system folder is not writable.
# we use a subprocess so we can catch an exception on failure.
# we need to check whether pip refused to install because of a
# version of the module previously installed with distutils. those
# must be uninstalled manually. We must not ignore this and drop
# back to install into a (forced) user folder.
print("Installing wheel")
for wheel in glob.glob('lammps-*.whl'):
try:
txt = subprocess.check_output([sys.executable, '-m', 'pip', 'install', '--force-reinstall', wheel], stderr=subprocess.STDOUT, shell=False)
print(txt.decode('UTF-8'))
continue
except subprocess.CalledProcessError as err:
errmsg = err.output.decode('UTF-8')
if errmsg.find("distutils installed"):
sys.exit(errmsg + "You need to uninstall the LAMMPS python module manually first.\n")
try:
print('Installing wheel into standard site-packages folder failed. Trying user folder now')
txt = subprocess.check_output([sys.executable, '-m', 'pip', 'install', '--user', '--force-reinstall', wheel], stderr=subprocess.STDOUT, shell=False)
print(txt.decode('UTF-8'))
except:
sys.exit('Failed to install wheel ' + wheel)
shutil.copy(wheel, olddir)
os.remove(wheel)
| akohlmey/lammps | python/install.py | Python | gpl-2.0 | 4,578 | [
"LAMMPS"
] | 2f11ede4f6e062b89cc65bfb72844b79dcc3d3105041ad297505f82bf4bbefd9 |
# commands.py - command processing for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import hex, nullid, nullrev, short
from lock import release
from i18n import _, gettext
import os, re, sys, difflib, time, tempfile
import hg, util, revlog, bundlerepo, extensions, copies, error
import patch, help, mdiff, url, encoding, templatekw
import archival, changegroup, cmdutil, sshserver, hbisect
from hgweb import server
import merge as merge_
import minirst
# Commands start here, listed alphabetically
def add(ui, repo, *pats, **opts):
"""add the specified files on the next commit
Schedule files to be version controlled and added to the
repository.
The files will be added to the repository at the next commit. To
undo an add before that, see hg forget.
If no names are given, add all files to the repository.
.. container:: verbose
An example showing how new (unknown) files are added
automatically by ``hg add``::
$ ls
foo.c
$ hg status
? foo.c
$ hg add
adding foo.c
$ hg status
A foo.c
"""
bad = []
names = []
m = cmdutil.match(repo, pats, opts)
oldbad = m.bad
m.bad = lambda x, y: bad.append(x) or oldbad(x, y)
for f in repo.walk(m):
exact = m.exact(f)
if exact or f not in repo.dirstate:
names.append(f)
if ui.verbose or not exact:
ui.status(_('adding %s\n') % m.rel(f))
if not opts.get('dry_run'):
bad += [f for f in repo.add(names) if f in m.files()]
return bad and 1 or 0
def addremove(ui, repo, *pats, **opts):
"""add all new files, delete all missing files
Add all new files and remove all missing files from the
repository.
New files are ignored if they match any of the patterns in
.hgignore. As with add, these changes take effect at the next
commit.
Use the -s/--similarity option to detect renamed files. With a
parameter greater than 0, this compares every removed file with
every added file and records those similar enough as renames. This
option takes a percentage between 0 (disabled) and 100 (files must
be identical) as its parameter. Detecting renamed files this way
can be expensive.
"""
try:
sim = float(opts.get('similarity') or 0)
except ValueError:
raise util.Abort(_('similarity must be a number'))
if sim < 0 or sim > 100:
raise util.Abort(_('similarity must be between 0 and 100'))
return cmdutil.addremove(repo, pats, opts, similarity=sim / 100.0)
def annotate(ui, repo, *pats, **opts):
"""show changeset information by line for each file
List changes in files, showing the revision id responsible for
each line
This command is useful for discovering when a change was made and
by whom.
Without the -a/--text option, annotate will avoid processing files
it detects as binary. With -a, annotate will annotate the file
anyway, although the results will probably be neither useful
nor desirable.
"""
if opts.get('follow'):
# --follow is deprecated and now just an alias for -f/--file
# to mimic the behavior of Mercurial before version 1.5
opts['file'] = 1
datefunc = ui.quiet and util.shortdate or util.datestr
getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
if not pats:
raise util.Abort(_('at least one filename or pattern is required'))
opmap = [('user', lambda x: ui.shortuser(x[0].user())),
('number', lambda x: str(x[0].rev())),
('changeset', lambda x: short(x[0].node())),
('date', getdate),
('file', lambda x: x[0].path()),
]
if (not opts.get('user') and not opts.get('changeset')
and not opts.get('date') and not opts.get('file')):
opts['number'] = 1
linenumber = opts.get('line_number') is not None
if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
raise util.Abort(_('at least one of -n/-c is required for -l'))
funcmap = [func for op, func in opmap if opts.get(op)]
if linenumber:
lastfunc = funcmap[-1]
funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
ctx = repo[opts.get('rev')]
m = cmdutil.match(repo, pats, opts)
follow = not opts.get('no_follow')
for abs in ctx.walk(m):
fctx = ctx[abs]
if not opts.get('text') and util.binary(fctx.data()):
ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
continue
lines = fctx.annotate(follow=follow, linenumber=linenumber)
pieces = []
for f in funcmap:
l = [f(n) for n, dummy in lines]
if l:
ml = max(map(len, l))
pieces.append(["%*s" % (ml, x) for x in l])
if pieces:
for p, l in zip(zip(*pieces), lines):
ui.write("%s: %s" % (" ".join(p), l[1]))
def archive(ui, repo, dest, **opts):
'''create an unversioned archive of a repository revision
By default, the revision used is the parent of the working
directory; use -r/--rev to specify a different revision.
To specify the type of archive to create, use -t/--type. Valid
types are:
:``files``: a directory full of files (default)
:``tar``: tar archive, uncompressed
:``tbz2``: tar archive, compressed using bzip2
:``tgz``: tar archive, compressed using gzip
:``uzip``: zip archive, uncompressed
:``zip``: zip archive, compressed using deflate
The exact name of the destination archive or directory is given
using a format string; see 'hg help export' for details.
Each member added to an archive file has a directory prefix
prepended. Use -p/--prefix to specify a format string for the
prefix. The default is the basename of the archive, with suffixes
removed.
'''
ctx = repo[opts.get('rev')]
if not ctx:
raise util.Abort(_('no working directory: please specify a revision'))
node = ctx.node()
dest = cmdutil.make_filename(repo, dest, node)
if os.path.realpath(dest) == repo.root:
raise util.Abort(_('repository root cannot be destination'))
matchfn = cmdutil.match(repo, [], opts)
kind = opts.get('type') or 'files'
prefix = opts.get('prefix')
if dest == '-':
if kind == 'files':
raise util.Abort(_('cannot archive plain files to stdout'))
dest = sys.stdout
if not prefix:
prefix = os.path.basename(repo.root) + '-%h'
prefix = cmdutil.make_filename(repo, prefix, node)
archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
matchfn, prefix)
def backout(ui, repo, node=None, rev=None, **opts):
'''reverse effect of earlier changeset
Commit the backed out changes as a new changeset. The new
changeset is a child of the backed out changeset.
If you backout a changeset other than the tip, a new head is
created. This head will be the new tip and you should merge this
backout changeset with another head.
The --merge option remembers the parent of the working directory
before starting the backout, then merges the new head with that
changeset afterwards. This saves you from doing the merge by hand.
The result of this merge is not committed, as with a normal merge.
See 'hg help dates' for a list of formats valid for -d/--date.
'''
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not rev:
rev = node
if not rev:
raise util.Abort(_("please specify a revision to backout"))
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
cmdutil.bail_if_changed(repo)
node = repo.lookup(rev)
op1, op2 = repo.dirstate.parents()
a = repo.changelog.ancestor(op1, node)
if a != node:
raise util.Abort(_('cannot backout change on a different branch'))
p1, p2 = repo.changelog.parents(node)
if p1 == nullid:
raise util.Abort(_('cannot backout a change with no parents'))
if p2 != nullid:
if not opts.get('parent'):
raise util.Abort(_('cannot backout a merge changeset without '
'--parent'))
p = repo.lookup(opts['parent'])
if p not in (p1, p2):
raise util.Abort(_('%s is not a parent of %s') %
(short(p), short(node)))
parent = p
else:
if opts.get('parent'):
raise util.Abort(_('cannot use --parent on non-merge changeset'))
parent = p1
# the backout should appear on the same branch
branch = repo.dirstate.branch()
hg.clean(repo, node, show_stats=False)
repo.dirstate.setbranch(branch)
revert_opts = opts.copy()
revert_opts['date'] = None
revert_opts['all'] = True
revert_opts['rev'] = hex(parent)
revert_opts['no_backup'] = None
revert(ui, repo, **revert_opts)
commit_opts = opts.copy()
commit_opts['addremove'] = False
if not commit_opts['message'] and not commit_opts['logfile']:
# we don't translate commit messages
commit_opts['message'] = "Backed out changeset %s" % short(node)
commit_opts['force_editor'] = True
commit(ui, repo, **commit_opts)
def nice(node):
return '%d:%s' % (repo.changelog.rev(node), short(node))
ui.status(_('changeset %s backs out changeset %s\n') %
(nice(repo.changelog.tip()), nice(node)))
if op1 != node:
hg.clean(repo, op1, show_stats=False)
if opts.get('merge'):
ui.status(_('merging with changeset %s\n')
% nice(repo.changelog.tip()))
hg.merge(repo, hex(repo.changelog.tip()))
else:
ui.status(_('the backout changeset is a new head - '
'do not forget to merge\n'))
ui.status(_('(use "backout --merge" '
'if you want to auto-merge)\n'))
def bisect(ui, repo, rev=None, extra=None, command=None,
reset=None, good=None, bad=None, skip=None, noupdate=None):
"""subdivision search of changesets
This command helps to find changesets which introduce problems. To
use, mark the earliest changeset you know exhibits the problem as
bad, then mark the latest changeset which is free from the problem
as good. Bisect will update your working directory to a revision
for testing (unless the -U/--noupdate option is specified). Once
you have performed tests, mark the working directory as good or
bad, and bisect will either update to another candidate changeset
or announce that it has found the bad revision.
As a shortcut, you can also use the revision argument to mark a
revision as good or bad without checking it out first.
If you supply a command, it will be used for automatic bisection.
Its exit status will be used to mark revisions as good or bad:
status 0 means good, 125 means to skip the revision, 127
(command not found) will abort the bisection, and any other
non-zero exit status means the revision is bad.
"""
def print_result(nodes, good):
displayer = cmdutil.show_changeset(ui, repo, {})
if len(nodes) == 1:
# narrowed it down to a single revision
if good:
ui.write(_("The first good revision is:\n"))
else:
ui.write(_("The first bad revision is:\n"))
displayer.show(repo[nodes[0]])
else:
# multiple possible revisions
if good:
ui.write(_("Due to skipped revisions, the first "
"good revision could be any of:\n"))
else:
ui.write(_("Due to skipped revisions, the first "
"bad revision could be any of:\n"))
for n in nodes:
displayer.show(repo[n])
displayer.close()
def check_state(state, interactive=True):
if not state['good'] or not state['bad']:
if (good or bad or skip or reset) and interactive:
return
if not state['good']:
raise util.Abort(_('cannot bisect (no known good revisions)'))
else:
raise util.Abort(_('cannot bisect (no known bad revisions)'))
return True
# backward compatibility
if rev in "good bad reset init".split():
ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
cmd, rev, extra = rev, extra, None
if cmd == "good":
good = True
elif cmd == "bad":
bad = True
else:
reset = True
elif extra or good + bad + skip + reset + bool(command) > 1:
raise util.Abort(_('incompatible arguments'))
if reset:
p = repo.join("bisect.state")
if os.path.exists(p):
os.unlink(p)
return
state = hbisect.load_state(repo)
if command:
changesets = 1
try:
while changesets:
# update state
status = util.system(command)
if status == 125:
transition = "skip"
elif status == 0:
transition = "good"
# status < 0 means process was killed
elif status == 127:
raise util.Abort(_("failed to execute %s") % command)
elif status < 0:
raise util.Abort(_("%s killed") % command)
else:
transition = "bad"
ctx = repo[rev or '.']
state[transition].append(ctx.node())
ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition))
check_state(state, interactive=False)
# bisect
nodes, changesets, good = hbisect.bisect(repo.changelog, state)
# update to next check
cmdutil.bail_if_changed(repo)
hg.clean(repo, nodes[0], show_stats=False)
finally:
hbisect.save_state(repo, state)
return print_result(nodes, good)
# update state
node = repo.lookup(rev or '.')
if good or bad or skip:
if good:
state['good'].append(node)
elif bad:
state['bad'].append(node)
elif skip:
state['skip'].append(node)
hbisect.save_state(repo, state)
if not check_state(state):
return
# actually bisect
nodes, changesets, good = hbisect.bisect(repo.changelog, state)
if changesets == 0:
print_result(nodes, good)
else:
assert len(nodes) == 1 # only a single node can be tested next
node = nodes[0]
# compute the approximate number of remaining tests
tests, size = 0, 2
while size <= changesets:
tests, size = tests + 1, size * 2
rev = repo.changelog.rev(node)
ui.write(_("Testing changeset %d:%s "
"(%d changesets remaining, ~%d tests)\n")
% (rev, short(node), changesets, tests))
if not noupdate:
cmdutil.bail_if_changed(repo)
return hg.clean(repo, node)
def branch(ui, repo, label=None, **opts):
"""set or show the current branch name
With no argument, show the current branch name. With one argument,
set the working directory branch name (the branch will not exist
in the repository until the next commit). Standard practice
recommends that primary development take place on the 'default'
branch.
Unless -f/--force is specified, branch will not let you set a
branch name that already exists, even if it's inactive.
Use -C/--clean to reset the working directory branch to that of
the parent of the working directory, negating a previous branch
change.
Use the command 'hg update' to switch to an existing branch. Use
'hg commit --close-branch' to mark this branch as closed.
"""
if opts.get('clean'):
label = repo[None].parents()[0].branch()
repo.dirstate.setbranch(label)
ui.status(_('reset working directory to branch %s\n') % label)
elif label:
utflabel = encoding.fromlocal(label)
if not opts.get('force') and utflabel in repo.branchtags():
if label not in [p.branch() for p in repo.parents()]:
raise util.Abort(_('a branch of the same name already exists'
" (use 'hg update' to switch to it)"))
repo.dirstate.setbranch(utflabel)
ui.status(_('marked working directory as branch %s\n') % label)
else:
ui.write("%s\n" % encoding.tolocal(repo.dirstate.branch()))
def branches(ui, repo, active=False, closed=False):
"""list repository named branches
List the repository's named branches, indicating which ones are
inactive. If -c/--closed is specified, also list branches which have
been marked closed (see hg commit --close-branch).
If -a/--active is specified, only show active branches. A branch
is considered active if it contains repository heads.
Use the command 'hg update' to switch to an existing branch.
"""
hexfunc = ui.debugflag and hex or short
activebranches = [repo[n].branch() for n in repo.heads()]
def testactive(tag, node):
realhead = tag in activebranches
open = node in repo.branchheads(tag, closed=False)
return realhead and open
branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag)
for tag, node in repo.branchtags().items()],
reverse=True)
for isactive, node, tag in branches:
if (not active) or isactive:
encodedtag = encoding.tolocal(tag)
if ui.quiet:
ui.write("%s\n" % encodedtag)
else:
hn = repo.lookup(node)
if isactive:
notice = ''
elif hn not in repo.branchheads(tag, closed=False):
if not closed:
continue
notice = _(' (closed)')
else:
notice = _(' (inactive)')
rev = str(node).rjust(31 - encoding.colwidth(encodedtag))
data = encodedtag, rev, hexfunc(hn), notice
ui.write("%s %s:%s%s\n" % data)
def bundle(ui, repo, fname, dest=None, **opts):
"""create a changegroup file
Generate a compressed changegroup file collecting changesets not
known to be in another repository.
If you omit the destination repository, then hg assumes the
destination will have all the nodes you specify with --base
parameters. To create a bundle containing all changesets, use
-a/--all (or --base null).
You can change compression method with the -t/--type option.
The available compression methods are: none, bzip2, and
gzip (by default, bundles are compressed using bzip2).
The bundle file can then be transferred using conventional means
and applied to another repository with the unbundle or pull
command. This is useful when direct push and pull are not
available or when exporting an entire repository is undesirable.
Applying bundles preserves all changeset contents including
permissions, copy/rename information, and revision history.
"""
revs = opts.get('rev') or None
if revs:
revs = [repo.lookup(rev) for rev in revs]
if opts.get('all'):
base = ['null']
else:
base = opts.get('base')
if base:
if dest:
raise util.Abort(_("--base is incompatible with specifying "
"a destination"))
base = [repo.lookup(rev) for rev in base]
# create the right base
# XXX: nodesbetween / changegroup* should be "fixed" instead
o = []
has = set((nullid,))
for n in base:
has.update(repo.changelog.reachable(n))
if revs:
visit = list(revs)
has.difference_update(revs)
else:
visit = repo.changelog.heads()
seen = {}
while visit:
n = visit.pop(0)
parents = [p for p in repo.changelog.parents(n) if p not in has]
if len(parents) == 0:
if n not in has:
o.append(n)
else:
for p in parents:
if p not in seen:
seen[p] = 1
visit.append(p)
else:
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
other = hg.repository(cmdutil.remoteui(repo, opts), dest)
revs, checkout = hg.addbranchrevs(repo, other, branches, revs)
o = repo.findoutgoing(other, force=opts.get('force'))
if not o:
ui.status(_("no changes found\n"))
return
if revs:
cg = repo.changegroupsubset(o, revs, 'bundle')
else:
cg = repo.changegroup(o, 'bundle')
bundletype = opts.get('type', 'bzip2').lower()
btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
bundletype = btypes.get(bundletype)
if bundletype not in changegroup.bundletypes:
raise util.Abort(_('unknown bundle type specified with --type'))
changegroup.writebundle(cg, fname, bundletype)
def cat(ui, repo, file1, *pats, **opts):
"""output the current or given revision of files
Print the specified files as they were at the given revision. If
no revision is given, the parent of the working directory is used,
or tip if no revision is checked out.
Output may be to a file, in which case the name of the file is
given using a format string. The formatting rules are the same as
for the export command, with the following additions:
:``%s``: basename of file being printed
:``%d``: dirname of file being printed, or '.' if in repository root
:``%p``: root-relative path name of file being printed
"""
ctx = repo[opts.get('rev')]
err = 1
m = cmdutil.match(repo, (file1,) + pats, opts)
for abs in ctx.walk(m):
fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
data = ctx[abs].data()
if opts.get('decode'):
data = repo.wwritedata(abs, data)
fp.write(data)
err = 0
return err
def clone(ui, source, dest=None, **opts):
"""make a copy of an existing repository
Create a copy of an existing repository in a new directory.
If no destination directory name is specified, it defaults to the
basename of the source.
The location of the source is added to the new repository's
.hg/hgrc file, as the default to be used for future pulls.
See 'hg help urls' for valid source format details.
It is possible to specify an ``ssh://`` URL as the destination, but no
.hg/hgrc and working directory will be created on the remote side.
Please see 'hg help urls' for important details about ``ssh://`` URLs.
A set of changesets (tags, or branch names) to pull may be specified
by listing each changeset (tag, or branch name) with -r/--rev.
If -r/--rev is used, the cloned repository will contain only a subset
of the changesets of the source repository. Only the set of changesets
defined by all -r/--rev options (including all their ancestors)
will be pulled into the destination repository.
No subsequent changesets (including subsequent tags) will be present
in the destination.
Using -r/--rev (or 'clone src#rev dest') implies --pull, even for
local source repositories.
For efficiency, hardlinks are used for cloning whenever the source
and destination are on the same filesystem (note this applies only
to the repository data, not to the working directory). Some
filesystems, such as AFS, implement hardlinking incorrectly, but
do not report errors. In these cases, use the --pull option to
avoid hardlinking.
In some cases, you can clone repositories and the working directory
using full hardlinks with ::
$ cp -al REPO REPOCLONE
This is the fastest way to clone, but it is not always safe. The
operation is not atomic (making sure REPO is not modified during
the operation is up to you) and you have to make sure your editor
breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
this is not compatible with certain extensions that place their
metadata under the .hg directory, such as mq.
Mercurial will update the working directory to the first applicable
revision from this list:
a) null if -U or the source repository has no changesets
b) if -u . and the source repository is local, the first parent of
the source repository's working directory
c) the changeset specified with -u (if a branch name, this means the
latest head of that branch)
d) the changeset specified with -r
e) the tipmost head specified with -b
f) the tipmost head specified with the url#branch source syntax
g) the tipmost head of the default branch
h) tip
"""
if opts.get('noupdate') and opts.get('updaterev'):
raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
hg.clone(cmdutil.remoteui(ui, opts), source, dest,
pull=opts.get('pull'),
stream=opts.get('uncompressed'),
rev=opts.get('rev'),
update=opts.get('updaterev') or not opts.get('noupdate'),
branch=opts.get('branch'))
def commit(ui, repo, *pats, **opts):
"""commit the specified files or all outstanding changes
Commit changes to the given files into the repository. Unlike a
centralized RCS, this operation is a local operation. See hg push
for a way to actively distribute your changes.
If a list of files is omitted, all changes reported by "hg status"
will be committed.
If you are committing the result of a merge, do not provide any
filenames or -I/-X filters.
If no commit message is specified, the configured editor is
started to prompt you for a message.
See 'hg help dates' for a list of formats valid for -d/--date.
"""
extra = {}
if opts.get('close_branch'):
extra['close'] = 1
e = cmdutil.commiteditor
if opts.get('force_editor'):
e = cmdutil.commitforceeditor
def commitfunc(ui, repo, message, match, opts):
return repo.commit(message, opts.get('user'), opts.get('date'), match,
editor=e, extra=extra)
node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
if not node:
ui.status(_("nothing changed\n"))
return
cl = repo.changelog
rev = cl.rev(node)
parents = cl.parentrevs(rev)
if rev - 1 in parents:
# one of the parents was the old tip
pass
elif (parents == (nullrev, nullrev) or
len(cl.heads(cl.node(parents[0]))) > 1 and
(parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
ui.status(_('created new head\n'))
if ui.debugflag:
ui.write(_('committed changeset %d:%s\n') % (rev, hex(node)))
elif ui.verbose:
ui.write(_('committed changeset %d:%s\n') % (rev, short(node)))
def copy(ui, repo, *pats, **opts):
"""mark files as copied for the next commit
Mark dest as having copies of source files. If dest is a
directory, copies are put in that directory. If dest is a file,
the source must be a single file.
By default, this command copies the contents of files as they
exist in the working directory. If invoked with -A/--after, the
operation is recorded, but no copying is performed.
This command takes effect with the next commit. To undo a copy
before that, see hg revert.
"""
wlock = repo.wlock(False)
try:
return cmdutil.copy(ui, repo, pats, opts)
finally:
wlock.release()
def debugancestor(ui, repo, *args):
"""find the ancestor revision of two revisions in a given index"""
if len(args) == 3:
index, rev1, rev2 = args
r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
lookup = r.lookup
elif len(args) == 2:
if not repo:
raise util.Abort(_("There is no Mercurial repository here "
"(.hg not found)"))
rev1, rev2 = args
r = repo.changelog
lookup = repo.lookup
else:
raise util.Abort(_('either two or three arguments required'))
a = r.ancestor(lookup(rev1), lookup(rev2))
ui.write("%d:%s\n" % (r.rev(a), hex(a)))
def debugcommands(ui, cmd='', *args):
for cmd, vals in sorted(table.iteritems()):
cmd = cmd.split('|')[0].strip('^')
opts = ', '.join([i[1] for i in vals[1]])
ui.write('%s: %s\n' % (cmd, opts))
def debugcomplete(ui, cmd='', **opts):
"""returns the completion list associated with the given command"""
if opts.get('options'):
options = []
otables = [globalopts]
if cmd:
aliases, entry = cmdutil.findcmd(cmd, table, False)
otables.append(entry[1])
for t in otables:
for o in t:
if o[0]:
options.append('-%s' % o[0])
options.append('--%s' % o[1])
ui.write("%s\n" % "\n".join(options))
return
cmdlist = cmdutil.findpossible(cmd, table)
if ui.verbose:
cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
ui.write("%s\n" % "\n".join(sorted(cmdlist)))
def debugfsinfo(ui, path = "."):
open('.debugfsinfo', 'w').write('')
ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
and 'yes' or 'no'))
os.unlink('.debugfsinfo')
def debugrebuildstate(ui, repo, rev="tip"):
"""rebuild the dirstate as it would look like for the given revision"""
ctx = repo[rev]
wlock = repo.wlock()
try:
repo.dirstate.rebuild(ctx.node(), ctx.manifest())
finally:
wlock.release()
def debugcheckstate(ui, repo):
"""validate the correctness of the current dirstate"""
parent1, parent2 = repo.dirstate.parents()
m1 = repo[parent1].manifest()
m2 = repo[parent2].manifest()
errors = 0
for f in repo.dirstate:
state = repo.dirstate[f]
if state in "nr" and f not in m1:
ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
errors += 1
if state in "a" and f in m1:
ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
errors += 1
if state in "m" and f not in m1 and f not in m2:
ui.warn(_("%s in state %s, but not in either manifest\n") %
(f, state))
errors += 1
for f in m1:
state = repo.dirstate[f]
if state not in "nrm":
ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
errors += 1
if errors:
error = _(".hg/dirstate inconsistent with current parent's manifest")
raise util.Abort(error)
def showconfig(ui, repo, *values, **opts):
"""show combined config settings from all hgrc files
With no arguments, print names and values of all config items.
With one argument of the form section.name, print just the value
of that config item.
With multiple arguments, print names and values of all config
items with matching section names.
With --debug, the source (filename and line number) is printed
for each config item.
"""
untrusted = bool(opts.get('untrusted'))
if values:
if len([v for v in values if '.' in v]) > 1:
raise util.Abort(_('only one config item permitted'))
for section, name, value in ui.walkconfig(untrusted=untrusted):
sectname = section + '.' + name
if values:
for v in values:
if v == section:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write('%s=%s\n' % (sectname, value))
elif v == sectname:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write(value, '\n')
else:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write('%s=%s\n' % (sectname, value))
def debugsetparents(ui, repo, rev1, rev2=None):
"""manually set the parents of the current working directory
This is useful for writing repository conversion tools, but should
be used with care.
"""
if not rev2:
rev2 = hex(nullid)
wlock = repo.wlock()
try:
repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
finally:
wlock.release()
def debugstate(ui, repo, nodates=None):
"""show the contents of the current dirstate"""
timestr = ""
showdate = not nodates
for file_, ent in sorted(repo.dirstate._map.iteritems()):
if showdate:
if ent[3] == -1:
# Pad or slice to locale representation
locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
time.localtime(0)))
timestr = 'unset'
timestr = (timestr[:locale_len] +
' ' * (locale_len - len(timestr)))
else:
timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
time.localtime(ent[3]))
if ent[1] & 020000:
mode = 'lnk'
else:
mode = '%3o' % (ent[1] & 0777)
ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
for f in repo.dirstate.copies():
ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
def debugsub(ui, repo, rev=None):
if rev == '':
rev = None
for k, v in sorted(repo[rev].substate.items()):
ui.write('path %s\n' % k)
ui.write(' source %s\n' % v[0])
ui.write(' revision %s\n' % v[1])
def debugdata(ui, file_, rev):
"""dump the contents of a data file revision"""
r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
try:
ui.write(r.revision(r.lookup(rev)))
except KeyError:
raise util.Abort(_('invalid revision identifier %s') % rev)
def debugdate(ui, date, range=None, **opts):
"""parse and display a date"""
if opts["extended"]:
d = util.parsedate(date, util.extendeddateformats)
else:
d = util.parsedate(date)
ui.write("internal: %s %s\n" % d)
ui.write("standard: %s\n" % util.datestr(d))
if range:
m = util.matchdate(range)
ui.write("match: %s\n" % m(d[0]))
def debugindex(ui, file_):
"""dump the contents of an index file"""
r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
ui.write(" rev offset length base linkrev"
" nodeid p1 p2\n")
for i in r:
node = r.node(i)
try:
pp = r.parents(node)
except:
pp = [nullid, nullid]
ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
short(node), short(pp[0]), short(pp[1])))
def debugindexdot(ui, file_):
"""dump an index DAG as a graphviz dot file"""
r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
ui.write("digraph G {\n")
for i in r:
node = r.node(i)
pp = r.parents(node)
ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
if pp[1] != nullid:
ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write("}\n")
def debuginstall(ui):
'''test Mercurial installation'''
def writetemp(contents):
(fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
f = os.fdopen(fd, "wb")
f.write(contents)
f.close()
return name
problems = 0
# encoding
ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
try:
encoding.fromlocal("test")
except util.Abort, inst:
ui.write(" %s\n" % inst)
ui.write(_(" (check that your locale is properly set)\n"))
problems += 1
# compiled modules
ui.status(_("Checking extensions...\n"))
try:
import bdiff, mpatch, base85
except Exception, inst:
ui.write(" %s\n" % inst)
ui.write(_(" One or more extensions could not be found"))
ui.write(_(" (check that you compiled the extensions)\n"))
problems += 1
# templates
ui.status(_("Checking templates...\n"))
try:
import templater
templater.templater(templater.templatepath("map-cmdline.default"))
except Exception, inst:
ui.write(" %s\n" % inst)
ui.write(_(" (templates seem to have been installed incorrectly)\n"))
problems += 1
# patch
ui.status(_("Checking patch...\n"))
patchproblems = 0
a = "1\n2\n3\n4\n"
b = "1\n2\n3\ninsert\n4\n"
fa = writetemp(a)
d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
os.path.basename(fa))
fd = writetemp(d)
files = {}
try:
patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
except util.Abort, e:
ui.write(_(" patch call failed:\n"))
ui.write(" " + str(e) + "\n")
patchproblems += 1
else:
if list(files) != [os.path.basename(fa)]:
ui.write(_(" unexpected patch output!\n"))
patchproblems += 1
a = open(fa).read()
if a != b:
ui.write(_(" patch test failed!\n"))
patchproblems += 1
if patchproblems:
if ui.config('ui', 'patch'):
ui.write(_(" (Current patch tool may be incompatible with patch,"
" or misconfigured. Please check your .hgrc file)\n"))
else:
ui.write(_(" Internal patcher failure, please report this error"
" to http://mercurial.selenic.com/bts/\n"))
problems += patchproblems
os.unlink(fa)
os.unlink(fd)
# editor
ui.status(_("Checking commit editor...\n"))
editor = ui.geteditor()
cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
if not cmdpath:
if editor == 'vi':
ui.write(_(" No commit editor set and can't find vi in PATH\n"))
ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
else:
ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
problems += 1
# check username
ui.status(_("Checking username...\n"))
try:
user = ui.username()
except util.Abort, e:
ui.write(" %s\n" % e)
ui.write(_(" (specify a username in your .hgrc file)\n"))
problems += 1
if not problems:
ui.status(_("No problems detected\n"))
else:
ui.write(_("%s problems detected,"
" please check your install!\n") % problems)
return problems
def debugrename(ui, repo, file1, *pats, **opts):
"""dump rename information"""
ctx = repo[opts.get('rev')]
m = cmdutil.match(repo, (file1,) + pats, opts)
for abs in ctx.walk(m):
fctx = ctx[abs]
o = fctx.filelog().renamed(fctx.filenode())
rel = m.rel(abs)
if o:
ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
else:
ui.write(_("%s not renamed\n") % rel)
def debugwalk(ui, repo, *pats, **opts):
"""show how files match on given patterns"""
m = cmdutil.match(repo, pats, opts)
items = list(repo.walk(m))
if not items:
return
fmt = 'f %%-%ds %%-%ds %%s' % (
max([len(abs) for abs in items]),
max([len(m.rel(abs)) for abs in items]))
for abs in items:
line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
ui.write("%s\n" % line.rstrip())
def diff(ui, repo, *pats, **opts):
"""diff repository (or selected files)
Show differences between revisions for the specified files.
Differences between files are shown using the unified diff format.
NOTE: diff may generate unexpected results for merges, as it will
default to comparing against the working directory's first parent
changeset if no revisions are specified.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.
Alternatively you can specify -c/--change with a revision to see
the changes in that changeset relative to its first parent.
Without the -a/--text option, diff will avoid generating diffs of
files it detects as binary. With -a, diff will generate a diff
anyway, probably with undesirable results.
Use the -g/--git option to generate diffs in the git extended diff
format. For more information, read 'hg help diffs'.
"""
revs = opts.get('rev')
change = opts.get('change')
stat = opts.get('stat')
reverse = opts.get('reverse')
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = repo.lookup(change)
node1 = repo[node2].parents()[0].node()
else:
node1, node2 = cmdutil.revpair(repo, revs)
if reverse:
node1, node2 = node2, node1
if stat:
opts['unified'] = '0'
diffopts = patch.diffopts(ui, opts)
m = cmdutil.match(repo, pats, opts)
it = patch.diff(repo, node1, node2, match=m, opts=diffopts)
if stat:
width = 80
if not ui.plain():
width = util.termwidth()
ui.write(patch.diffstat(util.iterlines(it), width=width,
git=diffopts.git))
else:
for chunk in it:
ui.write(chunk)
def export(ui, repo, *changesets, **opts):
"""dump the header and diffs for one or more changesets
Print the changeset header and diffs for one or more revisions.
The information shown in the changeset header is: author, date,
branch name (if non-default), changeset hash, parent(s) and commit
comment.
NOTE: export may generate unexpected diff output for merge
changesets, as it will compare the merge changeset against its
first parent only.
Output may be to a file, in which case the name of the file is
given using a format string. The formatting rules are as follows:
:``%%``: literal "%" character
:``%H``: changeset hash (40 bytes of hexadecimal)
:``%N``: number of patches being generated
:``%R``: changeset revision number
:``%b``: basename of the exporting repository
:``%h``: short-form changeset hash (12 bytes of hexadecimal)
:``%n``: zero-padded sequence number, starting at 1
:``%r``: zero-padded changeset revision number
Without the -a/--text option, export will avoid generating diffs
of files it detects as binary. With -a, export will generate a
diff anyway, probably with undesirable results.
Use the -g/--git option to generate diffs in the git extended diff
format. See 'hg help diffs' for more information.
With the --switch-parent option, the diff will be against the
second parent. It can be useful to review a merge.
"""
changesets += tuple(opts.get('rev', []))
if not changesets:
raise util.Abort(_("export requires at least one changeset"))
revs = cmdutil.revrange(repo, changesets)
if len(revs) > 1:
ui.note(_('exporting patches:\n'))
else:
ui.note(_('exporting patch:\n'))
patch.export(repo, revs, template=opts.get('output'),
switch_parent=opts.get('switch_parent'),
opts=patch.diffopts(ui, opts))
def forget(ui, repo, *pats, **opts):
"""forget the specified files on the next commit
Mark the specified files so they will no longer be tracked
after the next commit.
This only removes files from the current branch, not from the
entire project history, and it does not delete them from the
working directory.
To undo a forget before the next commit, see hg add.
"""
if not pats:
raise util.Abort(_('no files specified'))
m = cmdutil.match(repo, pats, opts)
s = repo.status(match=m, clean=True)
forget = sorted(s[0] + s[1] + s[3] + s[6])
for f in m.files():
if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
ui.warn(_('not removing %s: file is already untracked\n')
% m.rel(f))
for f in forget:
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
repo.remove(forget, unlink=False)
def grep(ui, repo, pattern, *pats, **opts):
"""search for a pattern in specified files and revisions
Search revisions of files for a regular expression.
This command behaves differently than Unix grep. It only accepts
Python/Perl regexps. It searches repository history, not the
working directory. It always prints the revision number in which a
match appears.
By default, grep only prints output for the first revision of a
file in which it finds a match. To get it to print every revision
that contains a change in match status ("-" for a match that
becomes a non-match, or "+" for a non-match that becomes a match),
use the --all flag.
"""
reflags = 0
if opts.get('ignore_case'):
reflags |= re.I
try:
regexp = re.compile(pattern, reflags)
except Exception, inst:
ui.warn(_("grep: invalid match pattern: %s\n") % inst)
return None
sep, eol = ':', '\n'
if opts.get('print0'):
sep = eol = '\0'
getfile = util.lrucachefunc(repo.file)
def matchlines(body):
begin = 0
linenum = 0
while True:
match = regexp.search(body, begin)
if not match:
break
mstart, mend = match.span()
linenum += body.count('\n', begin, mstart) + 1
lstart = body.rfind('\n', begin, mstart) + 1 or begin
begin = body.find('\n', mend) + 1 or len(body)
lend = begin - 1
yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
class linestate(object):
def __init__(self, line, linenum, colstart, colend):
self.line = line
self.linenum = linenum
self.colstart = colstart
self.colend = colend
def __hash__(self):
return hash((self.linenum, self.line))
def __eq__(self, other):
return self.line == other.line
matches = {}
copies = {}
def grepbody(fn, rev, body):
matches[rev].setdefault(fn, [])
m = matches[rev][fn]
for lnum, cstart, cend, line in matchlines(body):
s = linestate(line, lnum, cstart, cend)
m.append(s)
def difflinestates(a, b):
sm = difflib.SequenceMatcher(None, a, b)
for tag, alo, ahi, blo, bhi in sm.get_opcodes():
if tag == 'insert':
for i in xrange(blo, bhi):
yield ('+', b[i])
elif tag == 'delete':
for i in xrange(alo, ahi):
yield ('-', a[i])
elif tag == 'replace':
for i in xrange(alo, ahi):
yield ('-', a[i])
for i in xrange(blo, bhi):
yield ('+', b[i])
def display(fn, ctx, pstates, states):
rev = ctx.rev()
datefunc = ui.quiet and util.shortdate or util.datestr
found = False
filerevmatches = {}
if opts.get('all'):
iter = difflinestates(pstates, states)
else:
iter = [('', l) for l in states]
for change, l in iter:
cols = [fn, str(rev)]
if opts.get('line_number'):
cols.append(str(l.linenum))
if opts.get('all'):
cols.append(change)
if opts.get('user'):
cols.append(ui.shortuser(ctx.user()))
if opts.get('date'):
cols.append(datefunc(ctx.date()))
if opts.get('files_with_matches'):
c = (fn, rev)
if c in filerevmatches:
continue
filerevmatches[c] = 1
else:
cols.append(l.line)
ui.write(sep.join(cols), eol)
found = True
return found
skip = {}
revfiles = {}
matchfn = cmdutil.match(repo, pats, opts)
found = False
follow = opts.get('follow')
def prep(ctx, fns):
rev = ctx.rev()
pctx = ctx.parents()[0]
parent = pctx.rev()
matches.setdefault(rev, {})
matches.setdefault(parent, {})
files = revfiles.setdefault(rev, [])
for fn in fns:
flog = getfile(fn)
try:
fnode = ctx.filenode(fn)
except error.LookupError:
continue
copied = flog.renamed(fnode)
copy = follow and copied and copied[0]
if copy:
copies.setdefault(rev, {})[fn] = copy
if fn in skip:
if copy:
skip[copy] = True
continue
files.append(fn)
if fn not in matches[rev]:
grepbody(fn, rev, flog.read(fnode))
pfn = copy or fn
if pfn not in matches[parent]:
try:
fnode = pctx.filenode(pfn)
grepbody(pfn, parent, flog.read(fnode))
except error.LookupError:
pass
for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
rev = ctx.rev()
parent = ctx.parents()[0].rev()
for fn in sorted(revfiles.get(rev, [])):
states = matches[rev][fn]
copy = copies.get(rev, {}).get(fn)
if fn in skip:
if copy:
skip[copy] = True
continue
pstates = matches.get(parent, {}).get(copy or fn, [])
if pstates or states:
r = display(fn, ctx, pstates, states)
found = found or r
if r and not opts.get('all'):
skip[fn] = True
if copy:
skip[copy] = True
del matches[rev]
del revfiles[rev]
def heads(ui, repo, *branchrevs, **opts):
"""show current repository heads or show branch heads
With no arguments, show all repository branch heads.
Repository "heads" are changesets with no child changesets. They are
where development generally takes place and are the usual targets
for update and merge operations. Branch heads are changesets that have
no child changeset on the same branch.
If one or more REVs are given, only branch heads on the branches
associated with the specified changesets are shown.
If -c/--closed is specified, also show branch heads marked closed
(see hg commit --close-branch).
If STARTREV is specified, only those heads that are descendants of
STARTREV will be displayed.
If -t/--topo is specified, named branch mechanics will be ignored and only
changesets without children will be shown.
"""
if opts.get('rev'):
start = repo.lookup(opts['rev'])
else:
start = None
if opts.get('topo'):
heads = [repo[h] for h in repo.heads(start)]
else:
heads = []
for b, ls in repo.branchmap().iteritems():
if start is None:
heads += [repo[h] for h in ls]
continue
startrev = repo.changelog.rev(start)
descendants = set(repo.changelog.descendants(startrev))
descendants.add(startrev)
rev = repo.changelog.rev
heads += [repo[h] for h in ls if rev(h) in descendants]
if branchrevs:
decode, encode = encoding.fromlocal, encoding.tolocal
branches = set(repo[decode(br)].branch() for br in branchrevs)
heads = [h for h in heads if h.branch() in branches]
if not opts.get('closed'):
heads = [h for h in heads if not h.extra().get('close')]
if opts.get('active') and branchrevs:
dagheads = repo.heads(start)
heads = [h for h in heads if h.node() in dagheads]
if branchrevs:
haveheads = set(h.branch() for h in heads)
if branches - haveheads:
headless = ', '.join(encode(b) for b in branches - haveheads)
msg = _('no open branch heads found on branches %s')
if opts.get('rev'):
msg += _(' (started at %s)' % opts['rev'])
ui.warn((msg + '\n') % headless)
if not heads:
return 1
heads = sorted(heads, key=lambda x: -x.rev())
displayer = cmdutil.show_changeset(ui, repo, opts)
for ctx in heads:
displayer.show(ctx)
displayer.close()
def help_(ui, name=None, with_version=False, unknowncmd=False):
"""show help for a given topic or a help overview
With no arguments, print a list of commands with short help messages.
Given a topic, extension, or command name, print help for that
topic."""
option_lists = []
textwidth = util.termwidth() - 2
def addglobalopts(aliases):
if ui.verbose:
option_lists.append((_("global options:"), globalopts))
if name == 'shortlist':
option_lists.append((_('use "hg help" for the full list '
'of commands'), ()))
else:
if name == 'shortlist':
msg = _('use "hg help" for the full list of commands '
'or "hg -v" for details')
elif aliases:
msg = _('use "hg -v help%s" to show aliases and '
'global options') % (name and " " + name or "")
else:
msg = _('use "hg -v help %s" to show global options') % name
option_lists.append((msg, ()))
def helpcmd(name):
if with_version:
version_(ui)
ui.write('\n')
try:
aliases, entry = cmdutil.findcmd(name, table, strict=unknowncmd)
except error.AmbiguousCommand, inst:
# py3k fix: except vars can't be used outside the scope of the
# except block, nor can be used inside a lambda. python issue4617
prefix = inst.args[0]
select = lambda c: c.lstrip('^').startswith(prefix)
helplist(_('list of commands:\n\n'), select)
return
# check if it's an invalid alias and display its error if it is
if getattr(entry[0], 'badalias', False):
if not unknowncmd:
entry[0](ui)
return
# synopsis
if len(entry) > 2:
if entry[2].startswith('hg'):
ui.write("%s\n" % entry[2])
else:
ui.write('hg %s %s\n' % (aliases[0], entry[2]))
else:
ui.write('hg %s\n' % aliases[0])
# aliases
if not ui.quiet and len(aliases) > 1:
ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
# description
doc = gettext(entry[0].__doc__)
if not doc:
doc = _("(no help text available)")
if hasattr(entry[0], 'definition'): # aliased command
doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
if ui.quiet:
doc = doc.splitlines()[0]
keep = ui.verbose and ['verbose'] or []
formatted, pruned = minirst.format(doc, textwidth, keep=keep)
ui.write("\n%s\n" % formatted)
if pruned:
ui.write(_('\nuse "hg -v help %s" to show verbose help\n') % name)
if not ui.quiet:
# options
if entry[1]:
option_lists.append((_("options:\n"), entry[1]))
addglobalopts(False)
def helplist(header, select=None):
h = {}
cmds = {}
for c, e in table.iteritems():
f = c.split("|", 1)[0]
if select and not select(f):
continue
if (not select and name != 'shortlist' and
e[0].__module__ != __name__):
continue
if name == "shortlist" and not f.startswith("^"):
continue
f = f.lstrip("^")
if not ui.debugflag and f.startswith("debug"):
continue
doc = e[0].__doc__
if doc and 'DEPRECATED' in doc and not ui.verbose:
continue
doc = gettext(doc)
if not doc:
doc = _("(no help text available)")
h[f] = doc.splitlines()[0].rstrip()
cmds[f] = c.lstrip("^")
if not h:
ui.status(_('no commands defined\n'))
return
ui.status(header)
fns = sorted(h)
m = max(map(len, fns))
for f in fns:
if ui.verbose:
commands = cmds[f].replace("|",", ")
ui.write(" %s:\n %s\n"%(commands, h[f]))
else:
ui.write(' %-*s %s\n' % (m, f, util.wrap(h[f], m + 4)))
if not ui.quiet:
addglobalopts(True)
def helptopic(name):
for names, header, doc in help.helptable:
if name in names:
break
else:
raise error.UnknownCommand(name)
# description
if not doc:
doc = _("(no help text available)")
if hasattr(doc, '__call__'):
doc = doc()
ui.write("%s\n\n" % header)
ui.write("%s\n" % minirst.format(doc, textwidth, indent=4))
def helpext(name):
try:
mod = extensions.find(name)
doc = gettext(mod.__doc__) or _('no help text available')
except KeyError:
mod = None
doc = extensions.disabledext(name)
if not doc:
raise error.UnknownCommand(name)
if '\n' not in doc:
head, tail = doc, ""
else:
head, tail = doc.split('\n', 1)
ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head))
if tail:
ui.write(minirst.format(tail, textwidth))
ui.status('\n\n')
if mod:
try:
ct = mod.cmdtable
except AttributeError:
ct = {}
modcmds = set([c.split('|', 1)[0] for c in ct])
helplist(_('list of commands:\n\n'), modcmds.__contains__)
else:
ui.write(_('use "hg help extensions" for information on enabling '
'extensions\n'))
def helpextcmd(name):
cmd, ext, mod = extensions.disabledcmd(name, ui.config('ui', 'strict'))
doc = gettext(mod.__doc__).splitlines()[0]
msg = help.listexts(_("'%s' is provided by the following "
"extension:") % cmd, {ext: doc}, len(ext),
indent=4)
ui.write(minirst.format(msg, textwidth))
ui.write('\n\n')
ui.write(_('use "hg help extensions" for information on enabling '
'extensions\n'))
if name and name != 'shortlist':
i = None
if unknowncmd:
queries = (helpextcmd,)
else:
queries = (helptopic, helpcmd, helpext, helpextcmd)
for f in queries:
try:
f(name)
i = None
break
except error.UnknownCommand, inst:
i = inst
if i:
raise i
else:
# program name
if ui.verbose or with_version:
version_(ui)
else:
ui.status(_("Mercurial Distributed SCM\n"))
ui.status('\n')
# list of commands
if name == "shortlist":
header = _('basic commands:\n\n')
else:
header = _('list of commands:\n\n')
helplist(header)
if name != 'shortlist':
exts, maxlength = extensions.enabled()
text = help.listexts(_('enabled extensions:'), exts, maxlength)
if text:
ui.write("\n%s\n" % minirst.format(text, textwidth))
# list all option lists
opt_output = []
for title, options in option_lists:
opt_output.append(("\n%s" % title, None))
for shortopt, longopt, default, desc in options:
if _("DEPRECATED") in desc and not ui.verbose:
continue
opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
longopt and " --%s" % longopt),
"%s%s" % (desc,
default
and _(" (default: %s)") % default
or "")))
if not name:
ui.write(_("\nadditional help topics:\n\n"))
topics = []
for names, header, doc in help.helptable:
topics.append((sorted(names, key=len, reverse=True)[0], header))
topics_len = max([len(s[0]) for s in topics])
for t, desc in topics:
ui.write(" %-*s %s\n" % (topics_len, t, desc))
if opt_output:
opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
for first, second in opt_output:
if second:
second = util.wrap(second, opts_len + 3)
ui.write(" %-*s %s\n" % (opts_len, first, second))
else:
ui.write("%s\n" % first)
def identify(ui, repo, source=None,
rev=None, num=None, id=None, branch=None, tags=None):
"""identify the working copy or specified revision
With no revision, print a summary of the current state of the
repository.
Specifying a path to a repository root or Mercurial bundle will
cause lookup to operate on that repository/bundle.
This summary identifies the repository state using one or two
parent hash identifiers, followed by a "+" if there are
uncommitted changes in the working directory, a list of tags for
this revision and a branch name for non-default branches.
"""
if not repo and not source:
raise util.Abort(_("There is no Mercurial repository here "
"(.hg not found)"))
hexfunc = ui.debugflag and hex or short
default = not (num or id or branch or tags)
output = []
revs = []
if source:
source, branches = hg.parseurl(ui.expandpath(source))
repo = hg.repository(ui, source)
revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
if not repo.local():
if not rev and revs:
rev = revs[0]
if not rev:
rev = "tip"
if num or branch or tags:
raise util.Abort(
"can't query remote revision number, branch, or tags")
output = [hexfunc(repo.lookup(rev))]
elif not rev:
ctx = repo[None]
parents = ctx.parents()
changed = False
if default or id or num:
changed = util.any(repo.status())
if default or id:
output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
(changed) and "+" or "")]
if num:
output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
(changed) and "+" or ""))
else:
ctx = repo[rev]
if default or id:
output = [hexfunc(ctx.node())]
if num:
output.append(str(ctx.rev()))
if repo.local() and default and not ui.quiet:
b = encoding.tolocal(ctx.branch())
if b != 'default':
output.append("(%s)" % b)
# multiple tags for a single parent separated by '/'
t = "/".join(ctx.tags())
if t:
output.append(t)
if branch:
output.append(encoding.tolocal(ctx.branch()))
if tags:
output.extend(ctx.tags())
ui.write("%s\n" % ' '.join(output))
def import_(ui, repo, patch1, *patches, **opts):
"""import an ordered set of patches
Import a list of patches and commit them individually (unless
--no-commit is specified).
If there are outstanding changes in the working directory, import
will abort unless given the -f/--force flag.
You can import a patch straight from a mail message. Even patches
as attachments work (to use the body part, it must have type
text/plain or text/x-patch). From and Subject headers of email
message are used as default committer and commit message. All
text/plain body parts before first diff are added to commit
message.
If the imported patch was generated by hg export, user and
description from patch override values from message headers and
body. Values given on command line with -m/--message and -u/--user
override these.
If --exact is specified, import will set the working directory to
the parent of each patch before applying it, and will abort if the
resulting changeset has a different ID than the one recorded in
the patch. This may happen due to character set problems or other
deficiencies in the text patch format.
With -s/--similarity, hg will attempt to discover renames and
copies in the patch in the same way as 'addremove'.
To read a patch from standard input, use "-" as the patch name. If
a URL is specified, the patch will be downloaded from it.
See 'hg help dates' for a list of formats valid for -d/--date.
"""
patches = (patch1,) + patches
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
try:
sim = float(opts.get('similarity') or 0)
except ValueError:
raise util.Abort(_('similarity must be a number'))
if sim < 0 or sim > 100:
raise util.Abort(_('similarity must be between 0 and 100'))
if opts.get('exact') or not opts.get('force'):
cmdutil.bail_if_changed(repo)
d = opts["base"]
strip = opts["strip"]
wlock = lock = None
def tryone(ui, hunk):
tmpname, message, user, date, branch, nodeid, p1, p2 = \
patch.extract(ui, hunk)
if not tmpname:
return None
commitid = _('to working directory')
try:
cmdline_message = cmdutil.logmessage(opts)
if cmdline_message:
# pickup the cmdline msg
message = cmdline_message
elif message:
# pickup the patch msg
message = message.strip()
else:
# launch the editor
message = None
ui.debug('message:\n%s\n' % message)
wp = repo.parents()
if opts.get('exact'):
if not nodeid or not p1:
raise util.Abort(_('not a Mercurial patch'))
p1 = repo.lookup(p1)
p2 = repo.lookup(p2 or hex(nullid))
if p1 != wp[0].node():
hg.clean(repo, p1)
repo.dirstate.setparents(p1, p2)
elif p2:
try:
p1 = repo.lookup(p1)
p2 = repo.lookup(p2)
if p1 == wp[0].node():
repo.dirstate.setparents(p1, p2)
except error.RepoError:
pass
if opts.get('exact') or opts.get('import_branch'):
repo.dirstate.setbranch(branch or 'default')
files = {}
try:
patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
files=files, eolmode=None)
finally:
files = patch.updatedir(ui, repo, files,
similarity=sim / 100.0)
if not opts.get('no_commit'):
if opts.get('exact'):
m = None
else:
m = cmdutil.matchfiles(repo, files or [])
n = repo.commit(message, opts.get('user') or user,
opts.get('date') or date, match=m,
editor=cmdutil.commiteditor)
if opts.get('exact'):
if hex(n) != nodeid:
repo.rollback()
raise util.Abort(_('patch is damaged'
' or loses information'))
# Force a dirstate write so that the next transaction
# backups an up-do-date file.
repo.dirstate.write()
if n:
commitid = short(n)
return commitid
finally:
os.unlink(tmpname)
try:
wlock = repo.wlock()
lock = repo.lock()
lastcommit = None
for p in patches:
pf = os.path.join(d, p)
if pf == '-':
ui.status(_("applying patch from stdin\n"))
pf = sys.stdin
else:
ui.status(_("applying %s\n") % p)
pf = url.open(ui, pf)
haspatch = False
for hunk in patch.split(pf):
commitid = tryone(ui, hunk)
if commitid:
haspatch = True
if lastcommit:
ui.status(_('applied %s\n') % lastcommit)
lastcommit = commitid
if not haspatch:
raise util.Abort(_('no diffs found'))
finally:
release(lock, wlock)
def incoming(ui, repo, source="default", **opts):
"""show new changesets found in source
Show new changesets found in the specified path/URL or the default
pull location. These are the changesets that would have been pulled
if a pull at the time you issued this command.
For remote repository, using --bundle avoids downloading the
changesets twice if the incoming is followed by a pull.
See pull for valid source format details.
"""
limit = cmdutil.loglimit(opts)
source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
other = hg.repository(cmdutil.remoteui(repo, opts), source)
ui.status(_('comparing with %s\n') % url.hidepassword(source))
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
if revs:
revs = [other.lookup(rev) for rev in revs]
common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
force=opts["force"])
if not incoming:
try:
os.unlink(opts["bundle"])
except:
pass
ui.status(_("no changes found\n"))
return 1
cleanup = None
try:
fname = opts["bundle"]
if fname or not other.local():
# create a bundle (uncompressed if other repo is not local)
if revs is None and other.capable('changegroupsubset'):
revs = rheads
if revs is None:
cg = other.changegroup(incoming, "incoming")
else:
cg = other.changegroupsubset(incoming, revs, 'incoming')
bundletype = other.local() and "HG10BZ" or "HG10UN"
fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
# keep written bundle?
if opts["bundle"]:
cleanup = None
if not other.local():
# use the created uncompressed bundlerepo
other = bundlerepo.bundlerepository(ui, repo.root, fname)
o = other.changelog.nodesbetween(incoming, revs)[0]
if opts.get('newest_first'):
o.reverse()
displayer = cmdutil.show_changeset(ui, other, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
break
parents = [p for p in other.changelog.parents(n) if p != nullid]
if opts.get('no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(other[n])
displayer.close()
finally:
if hasattr(other, 'close'):
other.close()
if cleanup:
os.unlink(cleanup)
def init(ui, dest=".", **opts):
"""create a new repository in the given directory
Initialize a new repository in the given directory. If the given
directory does not exist, it will be created.
If no directory is given, the current directory is used.
It is possible to specify an ``ssh://`` URL as the destination.
See 'hg help urls' for more information.
"""
hg.repository(cmdutil.remoteui(ui, opts), dest, create=1)
def locate(ui, repo, *pats, **opts):
"""locate files matching specific patterns
Print files under Mercurial control in the working directory whose
names match the given patterns.
By default, this command searches all directories in the working
directory. To search just the current directory and its
subdirectories, use "--include .".
If no patterns are given to match, this command prints the names
of all files under Mercurial control in the working directory.
If you want to feed the output of this command into the "xargs"
command, use the -0 option to both this command and "xargs". This
will avoid the problem of "xargs" treating single filenames that
contain whitespace as multiple filenames.
"""
end = opts.get('print0') and '\0' or '\n'
rev = opts.get('rev') or None
ret = 1
m = cmdutil.match(repo, pats, opts, default='relglob')
m.bad = lambda x, y: False
for abs in repo[rev].walk(m):
if not rev and abs not in repo.dirstate:
continue
if opts.get('fullpath'):
ui.write(repo.wjoin(abs), end)
else:
ui.write(((pats and m.rel(abs)) or abs), end)
ret = 0
return ret
def log(ui, repo, *pats, **opts):
"""show revision history of entire repository or files
Print the revision history of the specified files or the entire
project.
File history is shown without following rename or copy history of
files. Use -f/--follow with a filename to follow history across
renames and copies. --follow without a filename will only show
ancestors or descendants of the starting revision. --follow-first
only follows the first parent of merge revisions.
If no revision range is specified, the default is tip:0 unless
--follow is set, in which case the working directory parent is
used as the starting revision.
See 'hg help dates' for a list of formats valid for -d/--date.
By default this command prints revision number and changeset id,
tags, non-trivial parents, user, date and time, and a summary for
each commit. When the -v/--verbose switch is used, the list of
changed files and full commit message are shown.
NOTE: log -p/--patch may generate unexpected diff output for merge
changesets, as it will only compare the merge changeset against
its first parent. Also, only files different from BOTH parents
will appear in files:.
"""
matchfn = cmdutil.match(repo, pats, opts)
limit = cmdutil.loglimit(opts)
count = 0
endrev = None
if opts.get('copies') and opts.get('rev'):
endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
df = False
if opts["date"]:
df = util.matchdate(opts["date"])
displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
def prep(ctx, fns):
rev = ctx.rev()
parents = [p for p in repo.changelog.parentrevs(rev)
if p != nullrev]
if opts.get('no_merges') and len(parents) == 2:
return
if opts.get('only_merges') and len(parents) != 2:
return
if opts.get('only_branch') and ctx.branch() not in opts['only_branch']:
return
if df and not df(ctx.date()[0]):
return
if opts['user'] and not [k for k in opts['user'] if k in ctx.user()]:
return
if opts.get('keyword'):
for k in [kw.lower() for kw in opts['keyword']]:
if (k in ctx.user().lower() or
k in ctx.description().lower() or
k in " ".join(ctx.files()).lower()):
break
else:
return
copies = None
if opts.get('copies') and rev:
copies = []
getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
for fn in ctx.files():
rename = getrenamed(fn, rev)
if rename:
copies.append((fn, rename[0]))
displayer.show(ctx, copies=copies)
for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
if count == limit:
break
if displayer.flush(ctx.rev()):
count += 1
displayer.close()
def manifest(ui, repo, node=None, rev=None):
"""output the current or given revision of the project manifest
Print a list of version controlled files for the given revision.
If no revision is given, the first parent of the working directory
is used, or the null revision if no revision is checked out.
With -v, print file permissions, symlink and executable bits.
With --debug, print file revision hashes.
"""
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not node:
node = rev
decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
ctx = repo[node]
for f in ctx:
if ui.debugflag:
ui.write("%40s " % hex(ctx.manifest()[f]))
if ui.verbose:
ui.write(decor[ctx.flags(f)])
ui.write("%s\n" % f)
def merge(ui, repo, node=None, **opts):
"""merge working directory with another revision
The current working directory is updated with all changes made in
the requested revision since the last common predecessor revision.
Files that changed between either parent are marked as changed for
the next commit and a commit must be performed before any further
updates to the repository are allowed. The next commit will have
two parents.
If no revision is specified, the working directory's parent is a
head revision, and the current branch contains exactly one other
head, the other head is merged with by default. Otherwise, an
explicit revision with which to merge with must be provided.
"""
if opts.get('rev') and node:
raise util.Abort(_("please specify just one revision"))
if not node:
node = opts.get('rev')
if not node:
branch = repo.changectx(None).branch()
bheads = repo.branchheads(branch)
if len(bheads) > 2:
ui.warn(_("abort: branch '%s' has %d heads - "
"please merge with an explicit rev\n")
% (branch, len(bheads)))
ui.status(_("(run 'hg heads .' to see heads)\n"))
return False
parent = repo.dirstate.parents()[0]
if len(bheads) == 1:
if len(repo.heads()) > 1:
ui.warn(_("abort: branch '%s' has one head - "
"please merge with an explicit rev\n" % branch))
ui.status(_("(run 'hg heads' to see all heads)\n"))
return False
msg = _('there is nothing to merge')
if parent != repo.lookup(repo[None].branch()):
msg = _('%s - use "hg update" instead') % msg
raise util.Abort(msg)
if parent not in bheads:
raise util.Abort(_('working dir not at a head rev - '
'use "hg update" or merge with an explicit rev'))
node = parent == bheads[0] and bheads[-1] or bheads[0]
if opts.get('preview'):
# find nodes that are ancestors of p2 but not of p1
p1 = repo.lookup('.')
p2 = repo.lookup(node)
nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
displayer = cmdutil.show_changeset(ui, repo, opts)
for node in nodes:
displayer.show(repo[node])
displayer.close()
return 0
return hg.merge(repo, node, force=opts.get('force'))
def outgoing(ui, repo, dest=None, **opts):
"""show changesets not found in the destination
Show changesets not found in the specified destination repository
or the default push location. These are the changesets that would
be pushed if a push was requested.
See pull for details of valid destination formats.
"""
limit = cmdutil.loglimit(opts)
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
if revs:
revs = [repo.lookup(rev) for rev in revs]
other = hg.repository(cmdutil.remoteui(repo, opts), dest)
ui.status(_('comparing with %s\n') % url.hidepassword(dest))
o = repo.findoutgoing(other, force=opts.get('force'))
if not o:
ui.status(_("no changes found\n"))
return 1
o = repo.changelog.nodesbetween(o, revs)[0]
if opts.get('newest_first'):
o.reverse()
displayer = cmdutil.show_changeset(ui, repo, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
break
parents = [p for p in repo.changelog.parents(n) if p != nullid]
if opts.get('no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(repo[n])
displayer.close()
def parents(ui, repo, file_=None, **opts):
"""show the parents of the working directory or revision
Print the working directory's parent revisions. If a revision is
given via -r/--rev, the parent of that revision will be printed.
If a file argument is given, the revision in which the file was
last changed (before the working directory revision or the
argument to --rev if given) is printed.
"""
rev = opts.get('rev')
if rev:
ctx = repo[rev]
else:
ctx = repo[None]
if file_:
m = cmdutil.match(repo, (file_,), opts)
if m.anypats() or len(m.files()) != 1:
raise util.Abort(_('can only specify an explicit filename'))
file_ = m.files()[0]
filenodes = []
for cp in ctx.parents():
if not cp:
continue
try:
filenodes.append(cp.filenode(file_))
except error.LookupError:
pass
if not filenodes:
raise util.Abort(_("'%s' not found in manifest!") % file_)
fl = repo.file(file_)
p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
else:
p = [cp.node() for cp in ctx.parents()]
displayer = cmdutil.show_changeset(ui, repo, opts)
for n in p:
if n != nullid:
displayer.show(repo[n])
displayer.close()
def paths(ui, repo, search=None):
"""show aliases for remote repositories
Show definition of symbolic path name NAME. If no name is given,
show definition of all available names.
Path names are defined in the [paths] section of /etc/mercurial/hgrc
and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
The path names ``default`` and ``default-push`` have a special
meaning. When performing a push or pull operation, they are used
as fallbacks if no location is specified on the command-line.
When ``default-push`` is set, it will be used for push and
``default`` will be used for pull; otherwise ``default`` is used
as the fallback for both. When cloning a repository, the clone
source is written as ``default`` in ``.hg/hgrc``. Note that
``default`` and ``default-push`` apply to all inbound (e.g. ``hg
incoming``) and outbound (e.g. ``hg outgoing``, ``hg email`` and
``hg bundle``) operations.
See 'hg help urls' for more information.
"""
if search:
for name, path in ui.configitems("paths"):
if name == search:
ui.write("%s\n" % url.hidepassword(path))
return
ui.warn(_("not found!\n"))
return 1
else:
for name, path in ui.configitems("paths"):
ui.write("%s = %s\n" % (name, url.hidepassword(path)))
def postincoming(ui, repo, modheads, optupdate, checkout):
if modheads == 0:
return
if optupdate:
if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
return hg.update(repo, checkout)
else:
ui.status(_("not updating, since new heads added\n"))
if modheads > 1:
ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
else:
ui.status(_("(run 'hg update' to get a working copy)\n"))
def pull(ui, repo, source="default", **opts):
"""pull changes from the specified source
Pull changes from a remote repository to a local one.
This finds all changes from the repository at the specified path
or URL and adds them to a local repository (the current one unless
-R is specified). By default, this does not update the copy of the
project in the working directory.
Use hg incoming if you want to see what would have been added by a
pull at the time you issued this command. If you then decide to
added those changes to the repository, you should use pull -r X
where X is the last changeset listed by hg incoming.
If SOURCE is omitted, the 'default' path will be used.
See 'hg help urls' for more information.
"""
source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
other = hg.repository(cmdutil.remoteui(repo, opts), source)
ui.status(_('pulling from %s\n') % url.hidepassword(source))
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
if revs:
try:
revs = [other.lookup(rev) for rev in revs]
except error.CapabilityError:
err = _("Other repository doesn't support revision lookup, "
"so a rev cannot be specified.")
raise util.Abort(err)
modheads = repo.pull(other, heads=revs, force=opts.get('force'))
if checkout:
checkout = str(repo.changelog.rev(other.lookup(checkout)))
return postincoming(ui, repo, modheads, opts.get('update'), checkout)
def push(ui, repo, dest=None, **opts):
"""push changes to the specified destination
Push changes from the local repository to the specified destination.
This is the symmetrical operation for pull. It moves changes from
the current repository to a different one. If the destination is
local this is identical to a pull in that directory from the
current one.
By default, push will refuse to run if it detects the result would
increase the number of remote heads. This generally indicates the
user forgot to pull and merge before pushing.
If -r/--rev is used, the named revision and all its ancestors will
be pushed to the remote repository.
Please see 'hg help urls' for important details about ``ssh://``
URLs. If DESTINATION is omitted, a default path will be used.
"""
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
other = hg.repository(cmdutil.remoteui(repo, opts), dest)
ui.status(_('pushing to %s\n') % url.hidepassword(dest))
if revs:
revs = [repo.lookup(rev) for rev in revs]
# push subrepos depth-first for coherent ordering
c = repo['']
subs = c.substate # only repos that are committed
for s in sorted(subs):
if not c.sub(s).push(opts.get('force')):
return False
r = repo.push(other, opts.get('force'), revs=revs)
return r == 0
def recover(ui, repo):
"""roll back an interrupted transaction
Recover from an interrupted commit or pull.
This command tries to fix the repository status after an
interrupted operation. It should only be necessary when Mercurial
suggests it.
"""
if repo.recover():
return hg.verify(repo)
return 1
def remove(ui, repo, *pats, **opts):
"""remove the specified files on the next commit
Schedule the indicated files for removal from the repository.
This only removes files from the current branch, not from the
entire project history. -A/--after can be used to remove only
files that have already been deleted, -f/--force can be used to
force deletion, and -Af can be used to remove files from the next
revision without deleting them from the working directory.
The following table details the behavior of remove for different
file states (columns) and option combinations (rows). The file
states are Added [A], Clean [C], Modified [M] and Missing [!] (as
reported by hg status). The actions are Warn, Remove (from branch)
and Delete (from disk)::
A C M !
none W RD W R
-f R RD RD R
-A W W W R
-Af R R R R
This command schedules the files to be removed at the next commit.
To undo a remove before that, see hg revert.
"""
after, force = opts.get('after'), opts.get('force')
if not pats and not after:
raise util.Abort(_('no files specified'))
m = cmdutil.match(repo, pats, opts)
s = repo.status(match=m, clean=True)
modified, added, deleted, clean = s[0], s[1], s[3], s[6]
for f in m.files():
if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
def warn(files, reason):
for f in files:
ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
% (m.rel(f), reason))
if force:
remove, forget = modified + deleted + clean, added
elif after:
remove, forget = deleted, []
warn(modified + added + clean, _('still exists'))
else:
remove, forget = deleted + clean, []
warn(modified, _('is modified'))
warn(added, _('has been marked for add'))
for f in sorted(remove + forget):
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
repo.forget(forget)
repo.remove(remove, unlink=not after)
def rename(ui, repo, *pats, **opts):
"""rename files; equivalent of copy + remove
Mark dest as copies of sources; mark sources for deletion. If dest
is a directory, copies are put in that directory. If dest is a
file, there can only be one source.
By default, this command copies the contents of files as they
exist in the working directory. If invoked with -A/--after, the
operation is recorded, but no copying is performed.
This command takes effect at the next commit. To undo a rename
before that, see hg revert.
"""
wlock = repo.wlock(False)
try:
return cmdutil.copy(ui, repo, pats, opts, rename=True)
finally:
wlock.release()
def resolve(ui, repo, *pats, **opts):
"""various operations to help finish a merge
This command includes several actions that are often useful while
performing a merge, after running ``merge`` but before running
``commit``. (It is only meaningful if your working directory has
two parents.) It is most relevant for merges with unresolved
conflicts, which are typically a result of non-interactive merging with
``internal:merge`` or a command-line merge tool like ``diff3``.
The available actions are:
1) list files that were merged with conflicts (U, for unresolved)
and without conflicts (R, for resolved): ``hg resolve -l``
(this is like ``status`` for merges)
2) record that you have resolved conflicts in certain files:
``hg resolve -m [file ...]`` (default: mark all unresolved files)
3) forget that you have resolved conflicts in certain files:
``hg resolve -u [file ...]`` (default: unmark all resolved files)
4) discard your current attempt(s) at resolving conflicts and
restart the merge from scratch: ``hg resolve file...``
(or ``-a`` for all unresolved files)
Note that Mercurial will not let you commit files with unresolved merge
conflicts. You must use ``hg resolve -m ...`` before you can commit
after a conflicting merge.
"""
all, mark, unmark, show, nostatus = \
[opts.get(o) for o in 'all mark unmark list no_status'.split()]
if (show and (mark or unmark)) or (mark and unmark):
raise util.Abort(_("too many options specified"))
if pats and all:
raise util.Abort(_("can't specify --all and patterns"))
if not (all or pats or show or mark or unmark):
raise util.Abort(_('no files or directories specified; '
'use --all to remerge all files'))
ms = merge_.mergestate(repo)
m = cmdutil.match(repo, pats, opts)
for f in ms:
if m(f):
if show:
if nostatus:
ui.write("%s\n" % f)
else:
ui.write("%s %s\n" % (ms[f].upper(), f))
elif mark:
ms.mark(f, "r")
elif unmark:
ms.mark(f, "u")
else:
wctx = repo[None]
mctx = wctx.parents()[-1]
# backup pre-resolve (merge uses .orig for its own purposes)
a = repo.wjoin(f)
util.copyfile(a, a + ".resolve")
# resolve file
ms.resolve(f, wctx, mctx)
# replace filemerge's .orig file with our resolve file
util.rename(a + ".resolve", a + ".orig")
def revert(ui, repo, *pats, **opts):
"""restore individual files or directories to an earlier state
(Use update -r to check out earlier revisions, revert does not
change the working directory parents.)
With no revision specified, revert the named files or directories
to the contents they had in the parent of the working directory.
This restores the contents of the affected files to an unmodified
state and unschedules adds, removes, copies, and renames. If the
working directory has two parents, you must explicitly specify a
revision.
Using the -r/--rev option, revert the given files or directories
to their contents as of a specific revision. This can be helpful
to "roll back" some or all of an earlier change. See 'hg help
dates' for a list of formats valid for -d/--date.
Revert modifies the working directory. It does not commit any
changes, or change the parent of the working directory. If you
revert to a revision other than the parent of the working
directory, the reverted files will thus appear modified
afterwards.
If a file has been deleted, it is restored. If the executable mode
of a file was changed, it is reset.
If names are given, all files matching the names are reverted.
If no arguments are given, no files are reverted.
Modified files are saved with a .orig suffix before reverting.
To disable these backups, use --no-backup.
"""
if opts["date"]:
if opts["rev"]:
raise util.Abort(_("you can't specify a revision and a date"))
opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
if not pats and not opts.get('all'):
raise util.Abort(_('no files or directories specified; '
'use --all to revert the whole repo'))
parent, p2 = repo.dirstate.parents()
if not opts.get('rev') and p2 != nullid:
raise util.Abort(_('uncommitted merge - please provide a '
'specific revision'))
ctx = repo[opts.get('rev')]
node = ctx.node()
mf = ctx.manifest()
if node == parent:
pmf = mf
else:
pmf = None
# need all matching names in dirstate and manifest of target rev,
# so have to walk both. do not print errors if files exist in one
# but not other.
names = {}
wlock = repo.wlock()
try:
# walk dirstate.
m = cmdutil.match(repo, pats, opts)
m.bad = lambda x, y: False
for abs in repo.walk(m):
names[abs] = m.rel(abs), m.exact(abs)
# walk target manifest.
def badfn(path, msg):
if path in names:
return
path_ = path + '/'
for f in names:
if f.startswith(path_):
return
ui.warn("%s: %s\n" % (m.rel(path), msg))
m = cmdutil.match(repo, pats, opts)
m.bad = badfn
for abs in repo[node].walk(m):
if abs not in names:
names[abs] = m.rel(abs), m.exact(abs)
m = cmdutil.matchfiles(repo, names)
changes = repo.status(match=m)[:4]
modified, added, removed, deleted = map(set, changes)
# if f is a rename, also revert the source
cwd = repo.getcwd()
for f in added:
src = repo.dirstate.copied(f)
if src and src not in names and repo.dirstate[src] == 'r':
removed.add(src)
names[src] = (repo.pathto(src, cwd), True)
def removeforget(abs):
if repo.dirstate[abs] == 'a':
return _('forgetting %s\n')
return _('removing %s\n')
revert = ([], _('reverting %s\n'))
add = ([], _('adding %s\n'))
remove = ([], removeforget)
undelete = ([], _('undeleting %s\n'))
disptable = (
# dispatch table:
# file state
# action if in target manifest
# action if not in target manifest
# make backup if in target manifest
# make backup if not in target manifest
(modified, revert, remove, True, True),
(added, revert, remove, True, False),
(removed, undelete, None, False, False),
(deleted, revert, remove, False, False),
)
for abs, (rel, exact) in sorted(names.items()):
mfentry = mf.get(abs)
target = repo.wjoin(abs)
def handle(xlist, dobackup):
xlist[0].append(abs)
if dobackup and not opts.get('no_backup') and util.lexists(target):
bakname = "%s.orig" % rel
ui.note(_('saving current version of %s as %s\n') %
(rel, bakname))
if not opts.get('dry_run'):
util.copyfile(target, bakname)
if ui.verbose or not exact:
msg = xlist[1]
if not isinstance(msg, basestring):
msg = msg(abs)
ui.status(msg % rel)
for table, hitlist, misslist, backuphit, backupmiss in disptable:
if abs not in table:
continue
# file has changed in dirstate
if mfentry:
handle(hitlist, backuphit)
elif misslist is not None:
handle(misslist, backupmiss)
break
else:
if abs not in repo.dirstate:
if mfentry:
handle(add, True)
elif exact:
ui.warn(_('file not managed: %s\n') % rel)
continue
# file has not changed in dirstate
if node == parent:
if exact:
ui.warn(_('no changes needed to %s\n') % rel)
continue
if pmf is None:
# only need parent manifest in this unlikely case,
# so do not read by default
pmf = repo[parent].manifest()
if abs in pmf:
if mfentry:
# if version of file is same in parent and target
# manifests, do nothing
if (pmf[abs] != mfentry or
pmf.flags(abs) != mf.flags(abs)):
handle(revert, False)
else:
handle(remove, False)
if not opts.get('dry_run'):
def checkout(f):
fc = ctx[f]
repo.wwrite(f, fc.data(), fc.flags())
audit_path = util.path_auditor(repo.root)
for f in remove[0]:
if repo.dirstate[f] == 'a':
repo.dirstate.forget(f)
continue
audit_path(f)
try:
util.unlink(repo.wjoin(f))
except OSError:
pass
repo.dirstate.remove(f)
normal = None
if node == parent:
# We're reverting to our parent. If possible, we'd like status
# to report the file as clean. We have to use normallookup for
# merges to avoid losing information about merged/dirty files.
if p2 != nullid:
normal = repo.dirstate.normallookup
else:
normal = repo.dirstate.normal
for f in revert[0]:
checkout(f)
if normal:
normal(f)
for f in add[0]:
checkout(f)
repo.dirstate.add(f)
normal = repo.dirstate.normallookup
if node == parent and p2 == nullid:
normal = repo.dirstate.normal
for f in undelete[0]:
checkout(f)
normal(f)
finally:
wlock.release()
def rollback(ui, repo):
"""roll back the last transaction
This command should be used with care. There is only one level of
rollback, and there is no way to undo a rollback. It will also
restore the dirstate at the time of the last transaction, losing
any dirstate changes since that time. This command does not alter
the working directory.
Transactions are used to encapsulate the effects of all commands
that create new changesets or propagate existing changesets into a
repository. For example, the following commands are transactional,
and their effects can be rolled back:
- commit
- import
- pull
- push (with this repository as the destination)
- unbundle
This command is not intended for use on public repositories. Once
changes are visible for pull by other users, rolling a transaction
back locally is ineffective (someone else may already have pulled
the changes). Furthermore, a race is possible with readers of the
repository; for example an in-progress pull from the repository
may fail if a rollback is performed.
"""
repo.rollback()
def root(ui, repo):
"""print the root (top) of the current working directory
Print the root directory of the current repository.
"""
ui.write(repo.root + "\n")
def serve(ui, repo, **opts):
"""export the repository via HTTP
Start a local HTTP repository browser and pull server. You can use
this for ad-hoc sharing and browing of repositories. It is
recommended to use a real web server to serve a repository for
longer periods of time.
Please note that the server does not implement access control.
This means that, by default, anybody can read from the server and
nobody can write to it by default. Set the ``web.allow_push``
option to ``*`` to allow everybody to push to the server. You
should use a real web server if you need to authenticate users.
By default, the server logs accesses to stdout and errors to
stderr. Use the -A/--accesslog and -E/--errorlog options to log to
files.
"""
if opts["stdio"]:
if repo is None:
raise error.RepoError(_("There is no Mercurial repository here"
" (.hg not found)"))
s = sshserver.sshserver(ui, repo)
s.serve_forever()
baseui = repo and repo.baseui or ui
optlist = ("name templates style address port prefix ipv6"
" accesslog errorlog webdir_conf certificate encoding")
for o in optlist.split():
if opts.get(o, None):
baseui.setconfig("web", o, str(opts[o]))
if (repo is not None) and (repo.ui != baseui):
repo.ui.setconfig("web", o, str(opts[o]))
if repo is None and not ui.config("web", "webdir_conf"):
raise error.RepoError(_("There is no Mercurial repository here"
" (.hg not found)"))
class service(object):
def init(self):
util.set_signal_handler()
self.httpd = server.create_server(baseui, repo)
if not ui.verbose:
return
if self.httpd.prefix:
prefix = self.httpd.prefix.strip('/') + '/'
else:
prefix = ''
port = ':%d' % self.httpd.port
if port == ':80':
port = ''
bindaddr = self.httpd.addr
if bindaddr == '0.0.0.0':
bindaddr = '*'
elif ':' in bindaddr: # IPv6
bindaddr = '[%s]' % bindaddr
fqaddr = self.httpd.fqaddr
if ':' in fqaddr:
fqaddr = '[%s]' % fqaddr
ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
(fqaddr, port, prefix, bindaddr, self.httpd.port))
def run(self):
self.httpd.serve_forever()
service = service()
cmdutil.service(opts, initfn=service.init, runfn=service.run)
def status(ui, repo, *pats, **opts):
"""show changed files in the working directory
Show status of files in the repository. If names are given, only
files that match are shown. Files that are clean or ignored or
the source of a copy/move operation, are not listed unless
-c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
Unless options described with "show only ..." are given, the
options -mardu are used.
Option -q/--quiet hides untracked (unknown and ignored) files
unless explicitly requested with -u/--unknown or -i/--ignored.
NOTE: status may appear to disagree with diff if permissions have
changed or a merge has occurred. The standard diff format does not
report permission changes and diff only reports changes relative
to one merge parent.
If one revision is given, it is used as the base revision.
If two revisions are given, the differences between them are
shown. The --change option can also be used as a shortcut to list
the changed files of a revision from its first parent.
The codes used to show the status of files are::
M = modified
A = added
R = removed
C = clean
! = missing (deleted by non-hg command, but still tracked)
? = not tracked
I = ignored
= origin of the previous file listed as A (added)
"""
revs = opts.get('rev')
change = opts.get('change')
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = repo.lookup(change)
node1 = repo[node2].parents()[0].node()
else:
node1, node2 = cmdutil.revpair(repo, revs)
cwd = (pats and repo.getcwd()) or ''
end = opts.get('print0') and '\0' or '\n'
copy = {}
states = 'modified added removed deleted unknown ignored clean'.split()
show = [k for k in states if opts.get(k)]
if opts.get('all'):
show += ui.quiet and (states[:4] + ['clean']) or states
if not show:
show = ui.quiet and states[:4] or states[:5]
stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
'ignored' in show, 'clean' in show, 'unknown' in show)
changestates = zip(states, 'MAR!?IC', stat)
if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
ctxn = repo[nullid]
ctx1 = repo[node1]
ctx2 = repo[node2]
added = stat[1]
if node2 is None:
added = stat[0] + stat[1] # merged?
for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
if k in added:
copy[k] = v
elif v in added:
copy[v] = k
for state, char, files in changestates:
if state in show:
format = "%s %%s%s" % (char, end)
if opts.get('no_status'):
format = "%%s%s" % end
for f in files:
ui.write(format % repo.pathto(f, cwd))
if f in copy:
ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
def summary(ui, repo, **opts):
"""summarize working directory state
This generates a brief summary of the working directory state,
including parents, branch, commit status, and available updates.
With the --remote option, this will check the default paths for
incoming and outgoing changes. This can be time-consuming.
"""
ctx = repo[None]
parents = ctx.parents()
pnode = parents[0].node()
tags = repo.tags()
for p in parents:
t = ' '.join([t for t in tags if tags[t] == p.node()])
if p.rev() == -1:
if not len(repo):
t += _(' (empty repository)')
else:
t += _(' (no revision checked out)')
ui.write(_('parent: %d:%s %s\n') % (p.rev(), str(p), t))
if p.description():
ui.status(' ' + p.description().splitlines()[0].strip() + '\n')
branch = ctx.branch()
bheads = repo.branchheads(branch)
m = _('branch: %s\n') % branch
if branch != 'default':
ui.write(m)
else:
ui.status(m)
st = list(repo.status(unknown=True))[:6]
ms = merge_.mergestate(repo)
st.append([f for f in ms if ms[f] == 'u'])
labels = [_('%d modified'), _('%d added'), _('%d removed'),
_('%d deleted'), _('%d unknown'), _('%d ignored'),
_('%d unresolved')]
t = []
for s, l in zip(st, labels):
if s:
t.append(l % len(s))
t = ', '.join(t)
cleanworkdir = False
if len(parents) > 1:
t += _(' (merge)')
elif branch != parents[0].branch():
t += _(' (new branch)')
elif (not st[0] and not st[1] and not st[2]):
t += _(' (clean)')
cleanworkdir = True
elif pnode not in bheads:
t += _(' (new branch head)')
if cleanworkdir:
ui.status(_('commit: %s\n') % t.strip())
else:
ui.write(_('commit: %s\n') % t.strip())
# all ancestors of branch heads - all ancestors of parent = new csets
new = [0] * len(repo)
cl = repo.changelog
for a in [cl.rev(n) for n in bheads]:
new[a] = 1
for a in cl.ancestors(*[cl.rev(n) for n in bheads]):
new[a] = 1
for a in [p.rev() for p in parents]:
if a >= 0:
new[a] = 0
for a in cl.ancestors(*[p.rev() for p in parents]):
new[a] = 0
new = sum(new)
if new == 0:
ui.status(_('update: (current)\n'))
elif pnode not in bheads:
ui.write(_('update: %d new changesets (update)\n') % new)
else:
ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
(new, len(bheads)))
if opts.get('remote'):
t = []
source, branches = hg.parseurl(ui.expandpath('default'))
other = hg.repository(cmdutil.remoteui(repo, {}), source)
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
ui.debug('comparing with %s\n' % url.hidepassword(source))
repo.ui.pushbuffer()
common, incoming, rheads = repo.findcommonincoming(other)
repo.ui.popbuffer()
if incoming:
t.append(_('1 or more incoming'))
dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
other = hg.repository(cmdutil.remoteui(repo, {}), dest)
ui.debug('comparing with %s\n' % url.hidepassword(dest))
repo.ui.pushbuffer()
o = repo.findoutgoing(other)
repo.ui.popbuffer()
o = repo.changelog.nodesbetween(o, None)[0]
if o:
t.append(_('%d outgoing') % len(o))
if t:
ui.write(_('remote: %s\n') % (', '.join(t)))
else:
ui.status(_('remote: (synced)\n'))
def tag(ui, repo, name1, *names, **opts):
"""add one or more tags for the current or given revision
Name a particular revision using <name>.
Tags are used to name particular revisions of the repository and are
very useful to compare different revisions, to go back to significant
earlier versions or to mark branch points as releases, etc.
If no revision is given, the parent of the working directory is
used, or tip if no revision is checked out.
To facilitate version control, distribution, and merging of tags,
they are stored as a file named ".hgtags" which is managed
similarly to other project files and can be hand-edited if
necessary. The file '.hg/localtags' is used for local tags (not
shared among repositories).
See 'hg help dates' for a list of formats valid for -d/--date.
"""
rev_ = "."
names = (name1,) + names
if len(names) != len(set(names)):
raise util.Abort(_('tag names must be unique'))
for n in names:
if n in ['tip', '.', 'null']:
raise util.Abort(_('the name \'%s\' is reserved') % n)
if opts.get('rev') and opts.get('remove'):
raise util.Abort(_("--rev and --remove are incompatible"))
if opts.get('rev'):
rev_ = opts['rev']
message = opts.get('message')
if opts.get('remove'):
expectedtype = opts.get('local') and 'local' or 'global'
for n in names:
if not repo.tagtype(n):
raise util.Abort(_('tag \'%s\' does not exist') % n)
if repo.tagtype(n) != expectedtype:
if expectedtype == 'global':
raise util.Abort(_('tag \'%s\' is not a global tag') % n)
else:
raise util.Abort(_('tag \'%s\' is not a local tag') % n)
rev_ = nullid
if not message:
# we don't translate commit messages
message = 'Removed tag %s' % ', '.join(names)
elif not opts.get('force'):
for n in names:
if n in repo.tags():
raise util.Abort(_('tag \'%s\' already exists '
'(use -f to force)') % n)
if not rev_ and repo.dirstate.parents()[1] != nullid:
raise util.Abort(_('uncommitted merge - please provide a '
'specific revision'))
r = repo[rev_].node()
if not message:
# we don't translate commit messages
message = ('Added tag %s for changeset %s' %
(', '.join(names), short(r)))
date = opts.get('date')
if date:
date = util.parsedate(date)
repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
def tags(ui, repo):
"""list repository tags
This lists both regular and local tags. When the -v/--verbose
switch is used, a third column "local" is printed for local tags.
"""
hexfunc = ui.debugflag and hex or short
tagtype = ""
for t, n in reversed(repo.tagslist()):
if ui.quiet:
ui.write("%s\n" % t)
continue
try:
hn = hexfunc(n)
r = "%5d:%s" % (repo.changelog.rev(n), hn)
except error.LookupError:
r = " ?:%s" % hn
else:
spaces = " " * (30 - encoding.colwidth(t))
if ui.verbose:
if repo.tagtype(t) == 'local':
tagtype = " local"
else:
tagtype = ""
ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
def tip(ui, repo, **opts):
"""show the tip revision
The tip revision (usually just called the tip) is the changeset
most recently added to the repository (and therefore the most
recently changed head).
If you have just made a commit, that commit will be the tip. If
you have just pulled changes from another repository, the tip of
that repository becomes the current tip. The "tip" tag is special
and cannot be renamed or assigned to a different changeset.
"""
displayer = cmdutil.show_changeset(ui, repo, opts)
displayer.show(repo[len(repo) - 1])
displayer.close()
def unbundle(ui, repo, fname1, *fnames, **opts):
"""apply one or more changegroup files
Apply one or more compressed changegroup files generated by the
bundle command.
"""
fnames = (fname1,) + fnames
lock = repo.lock()
try:
for fname in fnames:
f = url.open(ui, fname)
gen = changegroup.readbundle(f, fname)
modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
finally:
lock.release()
return postincoming(ui, repo, modheads, opts.get('update'), None)
def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
"""update working directory
Update the repository's working directory to the specified
changeset.
If no changeset is specified, attempt to update to the head of the
current branch. If this head is a descendant of the working
directory's parent, update to it, otherwise abort.
The following rules apply when the working directory contains
uncommitted changes:
1. If neither -c/--check nor -C/--clean is specified, and if
the requested changeset is an ancestor or descendant of
the working directory's parent, the uncommitted changes
are merged into the requested changeset and the merged
result is left uncommitted. If the requested changeset is
not an ancestor or descendant (that is, it is on another
branch), the update is aborted and the uncommitted changes
are preserved.
2. With the -c/--check option, the update is aborted and the
uncommitted changes are preserved.
3. With the -C/--clean option, uncommitted changes are discarded and
the working directory is updated to the requested changeset.
Use null as the changeset to remove the working directory (like 'hg
clone -U').
If you want to update just one file to an older changeset, use 'hg revert'.
See 'hg help dates' for a list of formats valid for -d/--date.
"""
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not rev:
rev = node
if check and clean:
raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
if check:
# we could use dirty() but we can ignore merge and branch trivia
c = repo[None]
if c.modified() or c.added() or c.removed():
raise util.Abort(_("uncommitted local changes"))
if date:
if rev:
raise util.Abort(_("you can't specify a revision and a date"))
rev = cmdutil.finddate(ui, repo, date)
if clean or check:
return hg.clean(repo, rev)
else:
return hg.update(repo, rev)
def verify(ui, repo):
"""verify the integrity of the repository
Verify the integrity of the current repository.
This will perform an extensive check of the repository's
integrity, validating the hashes and checksums of each entry in
the changelog, manifest, and tracked files, as well as the
integrity of their crosslinks and indices.
"""
return hg.verify(repo)
def version_(ui):
"""output version and copyright information"""
ui.write(_("Mercurial Distributed SCM (version %s)\n")
% util.version())
ui.status(_(
"\nCopyright (C) 2005-2010 Matt Mackall <mpm@selenic.com> and others\n"
"This is free software; see the source for copying conditions. "
"There is NO\nwarranty; "
"not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
))
# Command options and aliases are listed here, alphabetically
globalopts = [
('R', 'repository', '',
_('repository root directory or name of overlay bundle file')),
('', 'cwd', '', _('change working directory')),
('y', 'noninteractive', None,
_('do not prompt, assume \'yes\' for any required answers')),
('q', 'quiet', None, _('suppress output')),
('v', 'verbose', None, _('enable additional output')),
('', 'config', [],
_('set/override config option (use \'section.name=value\')')),
('', 'debug', None, _('enable debugging output')),
('', 'debugger', None, _('start debugger')),
('', 'encoding', encoding.encoding, _('set the charset encoding')),
('', 'encodingmode', encoding.encodingmode,
_('set the charset encoding mode')),
('', 'traceback', None, _('always print a traceback on exception')),
('', 'time', None, _('time how long the command takes')),
('', 'profile', None, _('print command execution profile')),
('', 'version', None, _('output version information and exit')),
('h', 'help', None, _('display help and exit')),
]
dryrunopts = [('n', 'dry-run', None,
_('do not perform actions, just print output'))]
remoteopts = [
('e', 'ssh', '', _('specify ssh command to use')),
('', 'remotecmd', '', _('specify hg command to run on the remote side')),
]
walkopts = [
('I', 'include', [], _('include names matching the given patterns')),
('X', 'exclude', [], _('exclude names matching the given patterns')),
]
commitopts = [
('m', 'message', '', _('use <text> as commit message')),
('l', 'logfile', '', _('read commit message from <file>')),
]
commitopts2 = [
('d', 'date', '', _('record datecode as commit date')),
('u', 'user', '', _('record the specified user as committer')),
]
templateopts = [
('', 'style', '', _('display using template map file')),
('', 'template', '', _('display with template')),
]
logopts = [
('p', 'patch', None, _('show patch')),
('g', 'git', None, _('use git extended diff format')),
('l', 'limit', '', _('limit number of changes displayed')),
('M', 'no-merges', None, _('do not show merges')),
] + templateopts
diffopts = [
('a', 'text', None, _('treat all files as text')),
('g', 'git', None, _('use git extended diff format')),
('', 'nodates', None, _('omit dates from diff headers'))
]
diffopts2 = [
('p', 'show-function', None, _('show which function each change is in')),
('', 'reverse', None, _('produce a diff that undoes the changes')),
('w', 'ignore-all-space', None,
_('ignore white space when comparing lines')),
('b', 'ignore-space-change', None,
_('ignore changes in the amount of white space')),
('B', 'ignore-blank-lines', None,
_('ignore changes whose lines are all blank')),
('U', 'unified', '', _('number of lines of context to show')),
('', 'stat', None, _('output diffstat-style summary of changes')),
]
similarityopts = [
('s', 'similarity', '',
_('guess renamed files by similarity (0<=s<=100)'))
]
table = {
"^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
"addremove":
(addremove, similarityopts + walkopts + dryrunopts,
_('[OPTION]... [FILE]...')),
"^annotate|blame":
(annotate,
[('r', 'rev', '', _('annotate the specified revision')),
('', 'follow', None,
_('follow copies/renames and list the filename (DEPRECATED)')),
('', 'no-follow', None, _("don't follow copies and renames")),
('a', 'text', None, _('treat all files as text')),
('u', 'user', None, _('list the author (long with -v)')),
('f', 'file', None, _('list the filename')),
('d', 'date', None, _('list the date (short with -q)')),
('n', 'number', None, _('list the revision number (default)')),
('c', 'changeset', None, _('list the changeset')),
('l', 'line-number', None,
_('show line number at the first appearance'))
] + walkopts,
_('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
"archive":
(archive,
[('', 'no-decode', None, _('do not pass files through decoders')),
('p', 'prefix', '', _('directory prefix for files in archive')),
('r', 'rev', '', _('revision to distribute')),
('t', 'type', '', _('type of distribution to create')),
] + walkopts,
_('[OPTION]... DEST')),
"backout":
(backout,
[('', 'merge', None,
_('merge with old dirstate parent after backout')),
('', 'parent', '', _('parent to choose when backing out merge')),
('r', 'rev', '', _('revision to backout')),
] + walkopts + commitopts + commitopts2,
_('[OPTION]... [-r] REV')),
"bisect":
(bisect,
[('r', 'reset', False, _('reset bisect state')),
('g', 'good', False, _('mark changeset good')),
('b', 'bad', False, _('mark changeset bad')),
('s', 'skip', False, _('skip testing changeset')),
('c', 'command', '', _('use command to check changeset state')),
('U', 'noupdate', False, _('do not update to target'))],
_("[-gbsr] [-U] [-c CMD] [REV]")),
"branch":
(branch,
[('f', 'force', None,
_('set branch name even if it shadows an existing branch')),
('C', 'clean', None, _('reset branch name to parent branch name'))],
_('[-fC] [NAME]')),
"branches":
(branches,
[('a', 'active', False,
_('show only branches that have unmerged heads')),
('c', 'closed', False,
_('show normal and closed branches'))],
_('[-ac]')),
"bundle":
(bundle,
[('f', 'force', None,
_('run even when the destination is unrelated')),
('r', 'rev', [],
_('a changeset intended to be added to the destination')),
('b', 'branch', [],
_('a specific branch you would like to bundle')),
('', 'base', [],
_('a base changeset assumed to be available at the destination')),
('a', 'all', None, _('bundle all changesets in the repository')),
('t', 'type', 'bzip2', _('bundle compression type to use')),
] + remoteopts,
_('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
"cat":
(cat,
[('o', 'output', '', _('print output to file with formatted name')),
('r', 'rev', '', _('print the given revision')),
('', 'decode', None, _('apply any matching decode filter')),
] + walkopts,
_('[OPTION]... FILE...')),
"^clone":
(clone,
[('U', 'noupdate', None,
_('the clone will include an empty working copy (only a repository)')),
('u', 'updaterev', '',
_('revision, tag or branch to check out')),
('r', 'rev', [],
_('include the specified changeset')),
('b', 'branch', [],
_('clone only the specified branch')),
('', 'pull', None, _('use pull protocol to copy metadata')),
('', 'uncompressed', None,
_('use uncompressed transfer (fast over LAN)')),
] + remoteopts,
_('[OPTION]... SOURCE [DEST]')),
"^commit|ci":
(commit,
[('A', 'addremove', None,
_('mark new/missing files as added/removed before committing')),
('', 'close-branch', None,
_('mark a branch as closed, hiding it from the branch list')),
] + walkopts + commitopts + commitopts2,
_('[OPTION]... [FILE]...')),
"copy|cp":
(copy,
[('A', 'after', None, _('record a copy that has already occurred')),
('f', 'force', None,
_('forcibly copy over an existing managed file')),
] + walkopts + dryrunopts,
_('[OPTION]... [SOURCE]... DEST')),
"debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
"debugcheckstate": (debugcheckstate, [], ''),
"debugcommands": (debugcommands, [], _('[COMMAND]')),
"debugcomplete":
(debugcomplete,
[('o', 'options', None, _('show the command options'))],
_('[-o] CMD')),
"debugdate":
(debugdate,
[('e', 'extended', None, _('try extended date formats'))],
_('[-e] DATE [RANGE]')),
"debugdata": (debugdata, [], _('FILE REV')),
"debugfsinfo": (debugfsinfo, [], _('[PATH]')),
"debugindex": (debugindex, [], _('FILE')),
"debugindexdot": (debugindexdot, [], _('FILE')),
"debuginstall": (debuginstall, [], ''),
"debugrebuildstate":
(debugrebuildstate,
[('r', 'rev', '', _('revision to rebuild to'))],
_('[-r REV] [REV]')),
"debugrename":
(debugrename,
[('r', 'rev', '', _('revision to debug'))],
_('[-r REV] FILE')),
"debugsetparents":
(debugsetparents, [], _('REV1 [REV2]')),
"debugstate":
(debugstate,
[('', 'nodates', None, _('do not display the saved mtime'))],
_('[OPTION]...')),
"debugsub":
(debugsub,
[('r', 'rev', '', _('revision to check'))],
_('[-r REV] [REV]')),
"debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
"^diff":
(diff,
[('r', 'rev', [], _('revision')),
('c', 'change', '', _('change made by revision'))
] + diffopts + diffopts2 + walkopts,
_('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...')),
"^export":
(export,
[('o', 'output', '', _('print output to file with formatted name')),
('', 'switch-parent', None, _('diff against the second parent')),
('r', 'rev', [], _('revisions to export')),
] + diffopts,
_('[OPTION]... [-o OUTFILESPEC] REV...')),
"^forget":
(forget,
[] + walkopts,
_('[OPTION]... FILE...')),
"grep":
(grep,
[('0', 'print0', None, _('end fields with NUL')),
('', 'all', None, _('print all revisions that match')),
('f', 'follow', None,
_('follow changeset history,'
' or file history across copies and renames')),
('i', 'ignore-case', None, _('ignore case when matching')),
('l', 'files-with-matches', None,
_('print only filenames and revisions that match')),
('n', 'line-number', None, _('print matching line numbers')),
('r', 'rev', [], _('only search files changed within revision range')),
('u', 'user', None, _('list the author (long with -v)')),
('d', 'date', None, _('list the date (short with -q)')),
] + walkopts,
_('[OPTION]... PATTERN [FILE]...')),
"heads":
(heads,
[('r', 'rev', '', _('show only heads which are descendants of REV')),
('t', 'topo', False, _('show topological heads only')),
('a', 'active', False,
_('show active branchheads only [DEPRECATED]')),
('c', 'closed', False,
_('show normal and closed branch heads')),
] + templateopts,
_('[-ac] [-r STARTREV] [REV]...')),
"help": (help_, [], _('[TOPIC]')),
"identify|id":
(identify,
[('r', 'rev', '', _('identify the specified revision')),
('n', 'num', None, _('show local revision number')),
('i', 'id', None, _('show global revision id')),
('b', 'branch', None, _('show branch')),
('t', 'tags', None, _('show tags'))],
_('[-nibt] [-r REV] [SOURCE]')),
"import|patch":
(import_,
[('p', 'strip', 1,
_('directory strip option for patch. This has the same '
'meaning as the corresponding patch option')),
('b', 'base', '', _('base path')),
('f', 'force', None,
_('skip check for outstanding uncommitted changes')),
('', 'no-commit', None,
_("don't commit, just update the working directory")),
('', 'exact', None,
_('apply patch to the nodes from which it was generated')),
('', 'import-branch', None,
_('use any branch information in patch (implied by --exact)'))] +
commitopts + commitopts2 + similarityopts,
_('[OPTION]... PATCH...')),
"incoming|in":
(incoming,
[('f', 'force', None,
_('run even if remote repository is unrelated')),
('n', 'newest-first', None, _('show newest record first')),
('', 'bundle', '', _('file to store the bundles into')),
('r', 'rev', [],
_('a remote changeset intended to be added')),
('b', 'branch', [],
_('a specific branch you would like to pull')),
] + logopts + remoteopts,
_('[-p] [-n] [-M] [-f] [-r REV]...'
' [--bundle FILENAME] [SOURCE]')),
"^init":
(init,
remoteopts,
_('[-e CMD] [--remotecmd CMD] [DEST]')),
"locate":
(locate,
[('r', 'rev', '', _('search the repository as it is in REV')),
('0', 'print0', None,
_('end filenames with NUL, for use with xargs')),
('f', 'fullpath', None,
_('print complete paths from the filesystem root')),
] + walkopts,
_('[OPTION]... [PATTERN]...')),
"^log|history":
(log,
[('f', 'follow', None,
_('follow changeset history,'
' or file history across copies and renames')),
('', 'follow-first', None,
_('only follow the first parent of merge changesets')),
('d', 'date', '', _('show revisions matching date spec')),
('C', 'copies', None, _('show copied files')),
('k', 'keyword', [], _('do case-insensitive search for a keyword')),
('r', 'rev', [], _('show the specified revision or range')),
('', 'removed', None, _('include revisions where files were removed')),
('m', 'only-merges', None, _('show only merges')),
('u', 'user', [], _('revisions committed by user')),
('b', 'only-branch', [],
_('show only changesets within the given named branch')),
('P', 'prune', [],
_('do not display revision or any of its ancestors')),
] + logopts + walkopts,
_('[OPTION]... [FILE]')),
"manifest":
(manifest,
[('r', 'rev', '', _('revision to display'))],
_('[-r REV]')),
"^merge":
(merge,
[('f', 'force', None, _('force a merge with outstanding changes')),
('r', 'rev', '', _('revision to merge')),
('P', 'preview', None,
_('review revisions to merge (no merge is performed)'))],
_('[-P] [-f] [[-r] REV]')),
"outgoing|out":
(outgoing,
[('f', 'force', None,
_('run even when the destination is unrelated')),
('r', 'rev', [],
_('a changeset intended to be included in the destination')),
('n', 'newest-first', None, _('show newest record first')),
('b', 'branch', [],
_('a specific branch you would like to push')),
] + logopts + remoteopts,
_('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
"parents":
(parents,
[('r', 'rev', '', _('show parents of the specified revision')),
] + templateopts,
_('[-r REV] [FILE]')),
"paths": (paths, [], _('[NAME]')),
"^pull":
(pull,
[('u', 'update', None,
_('update to new branch head if changesets were pulled')),
('f', 'force', None,
_('run even when remote repository is unrelated')),
('r', 'rev', [],
_('a remote changeset intended to be added')),
('b', 'branch', [],
_('a specific branch you would like to pull')),
] + remoteopts,
_('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
"^push":
(push,
[('f', 'force', None, _('force push')),
('r', 'rev', [],
_('a changeset intended to be included in the destination')),
('b', 'branch', [],
_('a specific branch you would like to push')),
] + remoteopts,
_('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
"recover": (recover, []),
"^remove|rm":
(remove,
[('A', 'after', None, _('record delete for missing files')),
('f', 'force', None,
_('remove (and delete) file even if added or modified')),
] + walkopts,
_('[OPTION]... FILE...')),
"rename|mv":
(rename,
[('A', 'after', None, _('record a rename that has already occurred')),
('f', 'force', None,
_('forcibly copy over an existing managed file')),
] + walkopts + dryrunopts,
_('[OPTION]... SOURCE... DEST')),
"resolve":
(resolve,
[('a', 'all', None, _('select all unresolved files')),
('l', 'list', None, _('list state of files needing merge')),
('m', 'mark', None, _('mark files as resolved')),
('u', 'unmark', None, _('unmark files as resolved')),
('n', 'no-status', None, _('hide status prefix'))]
+ walkopts,
_('[OPTION]... [FILE]...')),
"revert":
(revert,
[('a', 'all', None, _('revert all changes when no arguments given')),
('d', 'date', '', _('tipmost revision matching date')),
('r', 'rev', '', _('revert to the specified revision')),
('', 'no-backup', None, _('do not save backup copies of files')),
] + walkopts + dryrunopts,
_('[OPTION]... [-r REV] [NAME]...')),
"rollback": (rollback, []),
"root": (root, []),
"^serve":
(serve,
[('A', 'accesslog', '', _('name of access log file to write to')),
('d', 'daemon', None, _('run server in background')),
('', 'daemon-pipefds', '', _('used internally by daemon mode')),
('E', 'errorlog', '', _('name of error log file to write to')),
('p', 'port', 0, _('port to listen on (default: 8000)')),
('a', 'address', '',
_('address to listen on (default: all interfaces)')),
('', 'prefix', '',
_('prefix path to serve from (default: server root)')),
('n', 'name', '',
_('name to show in web pages (default: working directory)')),
('', 'webdir-conf', '', _('name of the webdir config file'
' (serve more than one repository)')),
('', 'pid-file', '', _('name of file to write process ID to')),
('', 'stdio', None, _('for remote clients')),
('t', 'templates', '', _('web templates to use')),
('', 'style', '', _('template style to use')),
('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
('', 'certificate', '', _('SSL certificate file'))],
_('[OPTION]...')),
"showconfig|debugconfig":
(showconfig,
[('u', 'untrusted', None, _('show untrusted configuration options'))],
_('[-u] [NAME]...')),
"^summary|sum":
(summary,
[('', 'remote', None, _('check for push and pull'))], '[--remote]'),
"^status|st":
(status,
[('A', 'all', None, _('show status of all files')),
('m', 'modified', None, _('show only modified files')),
('a', 'added', None, _('show only added files')),
('r', 'removed', None, _('show only removed files')),
('d', 'deleted', None, _('show only deleted (but tracked) files')),
('c', 'clean', None, _('show only files without changes')),
('u', 'unknown', None, _('show only unknown (not tracked) files')),
('i', 'ignored', None, _('show only ignored files')),
('n', 'no-status', None, _('hide status prefix')),
('C', 'copies', None, _('show source of copied files')),
('0', 'print0', None,
_('end filenames with NUL, for use with xargs')),
('', 'rev', [], _('show difference from revision')),
('', 'change', '', _('list the changed files of a revision')),
] + walkopts,
_('[OPTION]... [FILE]...')),
"tag":
(tag,
[('f', 'force', None, _('replace existing tag')),
('l', 'local', None, _('make the tag local')),
('r', 'rev', '', _('revision to tag')),
('', 'remove', None, _('remove a tag')),
# -l/--local is already there, commitopts cannot be used
('m', 'message', '', _('use <text> as commit message')),
] + commitopts2,
_('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
"tags": (tags, [], ''),
"tip":
(tip,
[('p', 'patch', None, _('show patch')),
('g', 'git', None, _('use git extended diff format')),
] + templateopts,
_('[-p] [-g]')),
"unbundle":
(unbundle,
[('u', 'update', None,
_('update to new branch head if changesets were unbundled'))],
_('[-u] FILE...')),
"^update|up|checkout|co":
(update,
[('C', 'clean', None, _('discard uncommitted changes (no backup)')),
('c', 'check', None, _('check for uncommitted changes')),
('d', 'date', '', _('tipmost revision matching date')),
('r', 'rev', '', _('revision'))],
_('[-c] [-C] [-d DATE] [[-r] REV]')),
"verify": (verify, []),
"version": (version_, []),
}
norepo = ("clone init version help debugcommands debugcomplete debugdata"
" debugindex debugindexdot debugdate debuginstall debugfsinfo")
optionalrepo = ("identify paths serve showconfig debugancestor")
| joewalnes/idea-community | plugins/hg4idea/testData/bin/mercurial/commands.py | Python | apache-2.0 | 143,852 | [
"VisIt"
] | 9fed6cf127ed48dd6d5f74da6a30d2a2fe80227f558cfce10c3e0456632cc8fe |
"""Core, non-language-specific features of DXR, implemented as a Plugin"""
from base64 import b64encode
from itertools import chain
from os.path import relpath, splitext, realpath, basename
import re
from flask import url_for
from funcy import identity
from jinja2 import Markup
from parsimonious import ParseError
from dxr.es import (UNINDEXED_STRING, UNANALYZED_STRING, UNINDEXED_INT,
UNINDEXED_LONG)
from dxr.exceptions import BadTerm
from dxr.filters import Filter, negatable, FILE, LINE
import dxr.indexers
from dxr.mime import is_binary_image, is_textual_image
from dxr.query import some_filters
from dxr.plugins import direct_search
from dxr.trigrammer import (regex_grammar, NGRAM_LENGTH, es_regex_filter,
NoTrigrams, PythonRegexVisitor)
from dxr.utils import glob_to_regex
__all__ = ['mappings', 'analyzers', 'TextFilter', 'PathFilter', 'FilenameFilter',
'ExtFilter', 'RegexpFilter', 'IdFilter', 'RefFilter']
PATH_SEGMENT_MAPPING = { # some portion of a path/to/a/folder/filename.cpp string
'type': 'string',
'index': 'not_analyzed', # support JS source fetching & sorting & browse() lookups
'fields': {
'trigrams_lower': {
'type': 'string',
'analyzer': 'trigramalyzer_lower' # accelerate wildcards
},
'trigrams': {
'type': 'string',
'analyzer': 'trigramalyzer'
}
}
}
mappings = {
# We also insert entries here for folders. This gives us folders in dir
# listings and the ability to find matches in folder pathnames.
FILE: {
'_all': {
'enabled': False
},
'properties': {
# FILE filters query this. It supports globbing via JS regex script.
'path': PATH_SEGMENT_MAPPING, # path/to/a/folder/filename.cpp
# Basename of path for fast lookup.
# FILE filters query this. It supports globbing via JS regex script.
'file_name': PATH_SEGMENT_MAPPING, # filename.cpp
'ext': UNANALYZED_STRING,
# the target path if this FILE is a symlink
'link': UNANALYZED_STRING,
# Folder listings query by folder and then display filename, size,
# and mod date.
'folder': UNANALYZED_STRING, # path/to/a/folder
# filename.cpp or leaf_folder (for sorting and display)
'name': UNANALYZED_STRING,
'size': UNINDEXED_INT, # bytes. not present for folders.
'modified': { # not present for folders
'type': 'date',
'index': 'no'
},
'is_folder': {
'type': 'boolean'
},
'raw_data': { # present only if the file is an image
'type': 'binary',
'index': 'no'
},
'is_binary': { # assumed False if not present
'type': 'boolean',
'index': 'no'
},
# Sidebar nav links:
'links': {
'type': 'object',
'properties': {
'order': UNINDEXED_INT,
'heading': UNINDEXED_STRING,
'items': {
'type': 'object',
'properties': {
'icon': UNINDEXED_STRING,
'title': UNINDEXED_STRING,
'href': UNINDEXED_STRING
}
}
}
}
}
},
# The line doctype is the main workhorse of DXR searches. The search
# results present lines, so that's what we index.
LINE: {
'_all': {
'enabled': False
},
'properties': {
'path': PATH_SEGMENT_MAPPING,
'file_name': PATH_SEGMENT_MAPPING,
'ext': UNANALYZED_STRING,
# TODO: After the query language refresh, use match_phrase_prefix
# queries on non-globbed paths, analyzing them with the path
# analyzer, for max perf. Perfect! Otherwise, fall back to trigram-
# accelerated substring or wildcard matching.
'number': {
'type': 'integer'
},
# We index content 2 ways to keep RAM use down. Naively, we should
# be able to pull the content.trigrams_lower source out using our
# JS regex script, but in actuality, that uses much more RAM than
# pulling just plain content, to the point of crashing.
'content': {
'type': 'string',
'index': 'not_analyzed', # Support fast fetching from JS.
# ES supports terms of only length 32766 (by UTF-8 encoded
# length). The limit here (in Unicode points, in an
# unfortunate violation of consistency) keeps us under that,
# even if every point encodes to a 4-byte sequence. In
# real-world terms, this gets past all the Chinese in zh.txt in
# mozilla-central.
'ignore_above': 32766 / 4,
# These get populated even if the ignore_above kicks in:
'fields': {
'trigrams_lower': {
'type': 'string',
'analyzer': 'trigramalyzer_lower'
},
'trigrams': {
'type': 'string',
'analyzer': 'trigramalyzer'
}
}
},
'refs': {
'type': 'object',
'start': UNINDEXED_INT,
'end': UNINDEXED_INT,
'payload': {
'type': 'object',
'properties': {
'plugin': UNINDEXED_STRING,
'id': UNINDEXED_STRING, # Ref ID
'menu_data': UNINDEXED_STRING, # opaque to ES
'hover': UNINDEXED_STRING,
# Hash of qualname of the symbol we're hanging the
# menu off of, if it is a symbol and we can come up
# with a qualname. This powers the highlighting of
# other occurrences of the symbol when you pull up the
# context menu.
'qualname_hash': UNINDEXED_LONG
}
}
},
'regions': {
'type': 'object',
'start': UNINDEXED_INT,
'end': UNINDEXED_INT,
'payload': UNINDEXED_STRING,
},
'annotations': {
'type': 'object',
'properties': {
'title': UNINDEXED_STRING,
'class': UNINDEXED_STRING,
'style': UNINDEXED_STRING
}
}
}
}
}
analyzers = {
'analyzer': {
# A lowercase trigram analyzer. This is probably good
# enough for accelerating regexes; we probably don't
# need to keep a separate case-sensitive index.
'trigramalyzer_lower': {
'type': 'custom',
'filter': ['lowercase'],
'tokenizer': 'trigram_tokenizer'
},
# And one for case-sensitive things:
'trigramalyzer': {
'type': 'custom',
'tokenizer': 'trigram_tokenizer'
},
'lowercase': { # Not used here but defined for plugins' use
'type': 'custom',
'filter': ['lowercase'],
'tokenizer': 'keyword'
}
},
'tokenizer': {
'trigram_tokenizer': {
'type': 'nGram',
'min_gram': NGRAM_LENGTH,
'max_gram': NGRAM_LENGTH
# Keeps all kinds of chars by default.
}
}
}
def _find_iter(haystack, needle):
"""Return an iterable of indices at which string ``needle`` is found in
``haystack``.
:arg haystack: The unicode string to search within
:arg needle: The unicode string to search for
Return only the first of overlapping occurrences.
"""
if needle:
needle_len = len(needle)
offset = 0
while True:
offset = haystack.find(needle, offset)
if offset == -1:
break
yield offset
offset += needle_len
class TextFilter(Filter):
"""Filter matching a run of plain text in a file"""
name = 'text'
@negatable
def filter(self):
text = self._term['arg']
if len(text) < NGRAM_LENGTH:
return None
return {
'query': {
'match_phrase': {
'content.trigrams' if self._term['case_sensitive']
else 'content.trigrams_lower': text
}
}
}
def highlight_content(self, result):
text_len = len(self._term['arg'])
maybe_lower = (identity if self._term['case_sensitive'] else
lambda x: x.lower())
return ((i, i + text_len) for i in
# We assume content is a singleton. How could it be
# otherwise?
_find_iter(maybe_lower(result['content'][0]),
maybe_lower(self._term['arg'])))
class _PathSegmentFilterBase(Filter):
"""A base class for a filter that matches a glob against a path segment."""
domain = FILE
def _regex_filter(self, path_seg_property_name, no_trigrams_error_text):
"""Return an ES regex filter that matches this filter's glob against the
path segment at path_seg_property_name.
"""
glob = self._term['arg']
try:
return es_regex_filter(
regex_grammar.parse(glob_to_regex(glob)),
path_seg_property_name,
is_case_sensitive=self._term['case_sensitive'])
except NoTrigrams:
raise BadTerm(no_trigrams_error_text)
class PathFilter(_PathSegmentFilterBase):
"""Substring filter for paths
Pre-ES parity dictates that this simply searches for paths that have the
argument as a substring. We may allow anchoring and such later.
"""
name = 'path'
description = Markup('File or directory sub-path to search within. <code>*'
'</code>, <code>?</code>, and <code>[...]</code> act '
'as shell wildcards.')
@negatable
def filter(self):
return self._regex_filter('path',
'Path globs need at least 3 literal '
'characters in a row for speed.')
class FilenameFilter(_PathSegmentFilterBase):
"""Substring filter for file names"""
name = 'file'
description = Markup('File to search within. <code>*</code>, '
'<code>?</code>, and <code>[...]</code> act as shell '
'wildcards.')
@negatable
def filter(self):
return self._regex_filter('file_name',
'File globs need at least 3 literal '
'characters in a row for speed.')
class ExtFilter(Filter):
"""Case-sensitive filter for exact matching on file extensions"""
name = 'ext'
domain = FILE
description = Markup('Filename extension: <code>ext:cpp</code>. Always '
'case-sensitive.')
# The intersection of two different Ext filters would always be nothing.
union_only = True
@negatable
def filter(self):
extension = self._term['arg']
return {
'term': {'ext': extension[1:] if extension.startswith('.')
else extension}
}
class RegexpFilter(Filter):
"""Regular expression filter for file content"""
name = 'regexp'
description = Markup(r'Regular expression. Examples: '
r'<code>regexp:(?i)\bs?printf</code> '
r'<code>regexp:"(three|3) mice"</code>')
def __init__(self, term, enabled_plugins):
"""Compile the Python equivalent of the regex so we don't have to lean
on the regex cache during highlighting.
Python's regex cache is naive: after it hits 100, it just clears: no
LRU.
"""
super(RegexpFilter, self).__init__(term, enabled_plugins)
try:
self._parsed_regex = regex_grammar.parse(term['arg'])
except ParseError:
raise BadTerm('Invalid regex.')
self._compiled_regex = (
re.compile(PythonRegexVisitor().visit(self._parsed_regex),
flags=0 if self._term['case_sensitive'] else re.I))
@negatable
def filter(self):
try:
return es_regex_filter(
self._parsed_regex,
'content',
is_case_sensitive=self._term['case_sensitive'])
except NoTrigrams:
raise BadTerm('Regexes need at least 3 literal characters in a '
'row for speed.')
def highlight_content(self, result):
return (m.span() for m in
self._compiled_regex.finditer(result['content'][0]))
class FilterAggregator(Filter):
"""Filter class that acts by constructing the union of some subset of the
filters of all currently enabled plugins.
"""
def __init__(self, term, enabled_plugins, condition=None):
super(FilterAggregator, self).__init__(term, enabled_plugins)
self.filters = [f(term, enabled_plugins) for f in
some_filters(enabled_plugins, condition)]
def filter(self):
# OR together all the underlying filters.
return {'or': filter(None, (f.filter() for f in self.filters))}
def highlight_content(self, result):
# Union all of our underlying filters.
return chain.from_iterable(f.highlight_content(result) for f in self.filters)
class IdFilter(FilterAggregator):
"""Filter aggregator for id: queries, groups together the results of all
filters that find declarations and definitions of names."""
name = 'id'
domain = LINE
description = Markup('Definition of an identifier: '
'<code>id:SomeClass</code> <code>id:@foofunction</code> '
'(@ forces case-sensitive search, '
'otherwise case-sensitive if not all lowercase)')
def __init__(self, term, enabled_plugins):
super(IdFilter, self).__init__(term, enabled_plugins, lambda f: f.is_identifier)
class RefFilter(FilterAggregator):
"""Filter aggregator for ref: queries, grouping together the results of
all filters that find references to names."""
name = 'ref'
domain = LINE
description = Markup('Reference to an identifier: '
'<code>ref:someVar</code> <code>ref:someType</code>')
def __init__(self, term, enabled_plugins):
super(RefFilter, self).__init__(term, enabled_plugins, lambda f: f.is_reference)
class TreeToIndex(dxr.indexers.TreeToIndex):
def environment(self, vars):
vars['source_folder'] = self.tree.source_folder
vars['build_folder'] = self.tree.object_folder
return vars
def file_to_index(self, path, contents):
return FileToIndex(path, contents, self.plugin_name, self.tree,
self.vcs_cache.vcs_for_path(path))
class FileToIndex(dxr.indexers.FileToIndex):
def __init__(self, path, contents, plugin_name, tree, vcs):
super(FileToIndex, self).__init__(path, contents, plugin_name, tree)
self.vcs = vcs
def needles(self):
"""Fill out path (and path.trigrams)."""
if self.is_link():
# realpath will keep following symlinks until it gets to the 'real' thing.
yield 'link', relpath(realpath(self.absolute_path()), self.tree.source_folder)
yield 'path', self.path
yield 'file_name', basename(self.path)
extension = splitext(self.path)[1]
if extension:
yield 'ext', extension[1:] # skip the period
# We store both the contents of textual images twice so that they can
# both show up in searches and be previewed in the browser.
if is_binary_image(self.path) or is_textual_image(self.path):
bytestring = (self.contents.encode('utf-8') if self.contains_text()
else self.contents)
yield 'raw_data', b64encode(bytestring)
# binary, but not an image
elif not self.contains_text():
yield 'is_binary', True
def needles_by_line(self):
"""Fill out line number and content for every line."""
for number, text in enumerate(self.contents.splitlines(True), 1):
yield [('number', number),
('content', text)]
def links(self):
if self.vcs:
vcs_relative_path = relpath(self.absolute_path(),
self.vcs.get_root_dir())
yield (5,
'%s (%s)' % (self.vcs.get_vcs_name(),
self.vcs.display_rev(vcs_relative_path)),
[('permalink', 'Permalink', url_for('.rev',
tree=self.tree.name,
revision=self.vcs.revision,
path=self.path))])
else:
yield 5, 'Untracked file', []
if is_textual_image(self.path):
yield (4,
'Image',
[('svgview', 'View', url_for('.raw',
tree=self.tree.name,
path=self.path))])
def is_interesting(self):
"""Core plugin puts all files in the search index."""
return True
# Match file name and line number: filename:n. Strip leading slashes because
# we don't have any in the index.
FILE_LINE_RE = re.compile("^/?(.+):([1-9][0-9]*)$")
def _file_and_line(term):
"""Return the pathname or filename and line number from a term with text
in the format filename:line_num. Return None if the term isn't in that
format.
"""
match = FILE_LINE_RE.match(term['arg'])
if match:
return match.group(1), int(match.group(2))
def _path_trigram_filter(path, is_case_sensitive):
"""Return an ES filter clause that returns docs whose paths match the
given path all the way to their ends.
If a given path starts with a /, the user is explicitly requesting a match
starting at the root level.
"""
if path.startswith('/'):
path = path[1:] # Leading slashes aren't stored in the index.
regex = '^{0}$' # Insist it start at the beginning.
else:
regex = '(/|^){0}$' # Start at any path segment.
return es_regex_filter(
regex_grammar.parse(regex.format(re.escape(path))),
'path',
is_case_sensitive)
@direct_search(priority=100)
def direct_path_and_line(term):
"""If the user types path:line_num, jump right to that line.
"path" can be any contiguous sequence of complete path segments extending
to the end of a path; to match fee/fi/fo/fum.cpp, any of the following
would work:
* /fee/fi/fo/fum.cpp
* fo/fum.cpp
* fum.cpp
"""
try:
path, line = _file_and_line(term)
except TypeError:
return None # no line number
try:
trigram_clause = _path_trigram_filter(path, term['case_sensitive'])
except NoTrigrams:
return None
return {
'and': [
trigram_clause,
{'term': {'number': line}}
]
}
@direct_search(priority=150, domain=FILE)
def direct_path(term):
"""If there is a (file or) contiguous trailing sequence of path segments
matching the query, jump straight to that file.
See :func:`direct_path_and_line` for examples.
"""
try:
return _path_trigram_filter(term['arg'], term['case_sensitive'])
except NoTrigrams:
return None
| pombredanne/dxr | dxr/plugins/core.py | Python | mit | 20,394 | [
"VisIt"
] | 162e7df37dc88d460744e8f44b1760bb8ae6e40afde6a880c5f70ccf6a739d63 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011 L Fiaschi, T Kroeger, C Sommer, C Straehle, U Koethe, FA Hamprecht. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE ABOVE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of their employers.
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
import __builtin__
from setuptools import setup
import os
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Copied from install-ilastik-deps.py ==> move to common place
__builtin__.installDir = os.environ["HOME"] + "/ilastik_deps_build"
__builtin__.pythonVersion="2.7"
__builtin__.gcc="/usr/bin/gcc"
__builtin__.gpp="/usr/bin/g++"
__builtin__.ls="/bin/ls"
__builtin__.cd="cd"
__builtin__.make="/usr/bin/make"
__builtin__.pwd="/bin/pwd"
__builtin__.cmake="/usr/local/bin/cmake"
__builtin__.hg="/usr/local/bin/hg"
__builtin__.git="/usr/local/git/bin/git"
__builtin__.pythonVersionPath = installDir+"/Frameworks/Python.framework/Versions/"+pythonVersion
__builtin__.pythonBinaryPath = pythonVersionPath+"/bin"
__builtin__.pythonSharePath = pythonVersionPath+"/share"
__builtin__.pythonLibrary = pythonVersionPath+"/libpython"+pythonVersion+".dylib"
__builtin__.pythonExecutable = pythonBinaryPath + "/python" + pythonVersion
__builtin__.pythonSitePackages = pythonLibrary + "/python" + pythonVersion + "/site-packages"
__builtin__.pythonIncludePath = pythonVersionPath + "/include/python" + pythonVersion
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
APP = ['ilastik/ilastikMain.py']
DATA_FILES = [ #installDir+'/plugins/imageformats/libqtiff.dylib',
#installDir+'/plugins/imageformats/libqjpeg.dylib',
installDir+"/lib/qt_menu.nib",
installDir+"/lib/libvigraimpex.3.dylib",
installDir+"/library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/vigra/vigranumpycore.so",
installDir+"/lib/libvtkCommonPythonD.5.9.dylib",
installDir+"/lib/libvtkPythonCore.5.9.dylib",
installDir+"/lib/libvtkCommon.5.9.dylib",
installDir+"/lib/libvtkPythonCore.5.9.dylib",
installDir+"/lib/libvtksys.5.9.dylib",
installDir+"/lib/libvtkFilteringPythonD.5.9.dylib",
installDir+"/lib/libvtkFiltering.5.9.dylib",
installDir+"/lib/libvtkIOPythonD.5.9.dylib",
installDir+"/lib/libvtkIO.5.9.dylib",
installDir+"/lib/libvtkDICOMParser.5.9.dylib",
installDir+"/lib/libvtkNetCDF.5.9.dylib",
installDir+"/lib/libvtkNetCDF_cxx.dyli/libb",
installDir+"/lib/libvtkmetaio.5.9.dylib",
installDir+"/lib/libvtksqlite.5.9.dylib",
installDir+"/lib/libvtkpng.5.9.dylib",
installDir+"/lib/libvtkzlib.5.9.dylib",
installDir+"/lib/libvtkjpeg.5.9.dylib",
installDir+"/lib/libvtktiff.5.9.dylib",
installDir+"/lib/libvtkexpat.5.9.dylib",
installDir+"/lib/libvtkImagingPythonD.5.9.dylib",
installDir+"/lib/libvtkImaging.5.9.dylib",
installDir+"/lib/libvtkGraphicsPythonD.5.9.dylib",
installDir+"/lib/libvtkGraphics.5.9.dylib",
installDir+"/lib/libvtkverdict.5.9.dylib",
installDir+"/lib/libvtkGenericFilteringPythonD.5.9.dylib",
installDir+"/lib/libvtkGenericFiltering.5.9.dylib",
installDir+"/lib/libvtkRenderingPythonD.5.9.dylib",
installDir+"/lib/libvtkRendering.5.9.dylib",
installDir+"/lib/libvtkftgl.5.9.dylib",
installDir+"/lib/libvtkfreetype.5.9.dylib",
installDir+"/lib/libvtkVolumeRenderingPythonD.5.9.dylib",
installDir+"/lib/libvtkVolumeRendering.5.9.dylib",
installDir+"/lib/libvtkHybridPythonD.5.9.dylib",
installDir+"/lib/libvtkHybrid.5.9.dylib",
installDir+"/lib/libvtkexoIIc.5.9.dylib",
installDir+"/lib/libvtkWidgetsPythonD.5.9.dylib",
installDir+"/lib/libvtkWidgets.5.9.dylib",
installDir+"/lib/libvtkChartsPythonD.5.9.dylib",
installDir+"/lib/libvtkCharts.5.9.dylib",
installDir+"/lib/libvtkViewsPythonD.5.9.dylib",
installDir+"/lib/libvtkViews.5.9.dylib",
installDir+"/lib/libvtkInfovisPythonD.5.9.dylib",
installDir+"/lib/libvtkInfovis.5.9.dylib",
installDir+"/lib/libvtklibxml2.5.9.dylib",
installDir+"/lib/libvtkalglib.5.9.dylib",
installDir+"/lib/libvtkGeovisPythonD.5.9.dylib",
installDir+"/lib/libvtkGeovis.5.9.dylib",
installDir+"/lib/libvtkproj4.5.9.dylib",
installDir+"/lib/libvtkQtPythonD.dylib",
installDir+"/lib/libQVTKWidgetPlugin.dylib",
installDir+"/lib/libQVTK.dylib",
installDir+"/lib/libQVTK.5.9.0.dylib",
installDir+"/lib/libQVTK.5.9.dylib",
installDir+"/lib/vtkQtPython.so",
installDir+"/lib/QVTKPython.so",
installDir+"/lib/libboost_python.dylib",
pythonSitePackages+"/VTK-5.9.0-py2.6.egg/vtk/vtkCommonPython.so",
pythonSitePackages+"/VTK-5.9.0-py2.6.egg/vtk/vtkCommonPythonSIP.so",
]
OPTIONS = {'argv_emulation': False,
'packages':['PyQt4'],
'includes':[\
'distutils', 'sip', 'ctypes','ctypes.util','h5py._stub','h5py._conv','h5py.utils',
# http://permalink.gmane.org/gmane.comp.python.enthought.devel/26705
# The backends are dynamically imported and thus we need to
# tell py2app about them.
# Essential entries for bundling PyQt
'PyQt4.pyqtconfig', 'PyQt4.uic','PyQt4.QtCore','PyQt4.QtGui',
'site', 'os','vtk',
'vtk.vtkCommonPythonSIP',
#'vtk.vtkFilteringPythonSIP',
'vtk.vtkRenderingPythonSIP',
'vtk.vtkFilteringPythonSIP',
'numpy.core.multiarray',
'vigra', 'h5py._proxy', 'csv', #'vigra.svs',
'enthought',
'enthought.qt',
'enthought.pyface.*',
'enthought.pyface.ui.qt4.*',
'enthought.pyface.ui.qt4.action.*',
'enthought.pyface.ui.qt4.timer.*',
'enthought.pyface.ui.qt4.wizard.*',
'enthought.pyface.ui.qt4.workbench.*',
'enthought.pyface.action.*',
'enthought.pyface.toolkit',
'enthought.traits',
'enthought.traits.api',
'enthought.traits.ui.*',
'enthought.traits.ui.qt4.*',
'enthought.traits.ui.qt4.extra.*',
'enthought.pyface.ui.null',
'enthought.pyface.ui.null.action.*',
'qimage2ndarray',
#New Graph stuff
'greenlet',
'psutil',
],
'frameworks': [],
}
class ilastik_recipe(object):
def check(self, dist, mf):
m = mf.findNode('ilastik')
if m is None:
return None
return dict(
packages=['ilastik'],
prescripts=['osx-bundle-pre-launch.py']
)
import py2app.recipes
py2app.recipes.ilastik = ilastik_recipe()
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
iconfile='appIcon.icns',
)
| ilastik/ilastik-0.5 | setup_mac.py | Python | bsd-2-clause | 9,322 | [
"VTK"
] | 03452128d4b3763374e4d0b2728fd26c4bea62df1c690b053e2101b0451ee3e1 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from wx.lib.ClickableHtmlWindow import PyClickableHtmlWindow
from robotide.version import VERSION
from robotide.pluginapi import ActionInfo
class ReleaseNotes(object):
"""Shows release notes of the current version.
The release notes tab will automatically be shown once per release.
The user can also view them on demand by selecting "Release Notes"
from the help menu.
"""
def __init__(self, application):
self.application = application
settings = application.settings
self.version_shown = settings.get('version_shown', '')
self._view = None
self.enable()
def enable(self):
self.application.frame.actions.register_action(ActionInfo('Help', 'Release Notes', self.show,
doc='Show the release notes'))
self.show_if_updated()
def show_if_updated(self):
if self.version_shown != VERSION:
self.show()
self.application.settings['version_shown'] = VERSION
def show(self, event=None):
if not self._view:
self._view = self._create_view()
self.application.frame.notebook.AddPage(self._view, "Release Notes", select=False)
self.application.frame.notebook.show_tab(self._view)
def bring_to_front(self):
if self._view:
self.application.frame.notebook.show_tab(self._view)
def _create_view(self):
panel = wx.Panel(self.application.frame.notebook)
html_win = PyClickableHtmlWindow(panel, -1)
html_win.SetStandardFonts()
html_win.SetPage(WELCOME_TEXT + RELEASE_NOTES)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(html_win, 1, wx.EXPAND|wx.ALL, border=8)
panel.SetSizer(sizer)
return panel
WELCOME_TEXT = """
<h2>Welcome to use RIDE version %s</h2>
<p>Thank you for using the Robot Framework IDE (RIDE).</p>
<p>Visit RIDE on the web:</p>
<ul>
<li><a href="https://github.com/robotframework/RIDE">
RIDE project page on github</a></li>
<li><a href="https://github.com/robotframework/RIDE/wiki/Installation-Instructions">
Installation instructions</a></li>
<li><a href="https://github.com/robotframework/RIDE/wiki/Release-notes">
Release notes</a></li>
</ul>
""" % VERSION
# *** DO NOT EDIT THE CODE BELOW MANUALLY ***
# Release notes are updated automatically by package.py script whenever
# a numbered distribution is created.
RELEASE_NOTES = """
<h2>Release notes for 1.5a2</h2>
<table border="1">
<tr>
<td><p><b>ID</b></p></td>
<td><p><b>Type</b></p></td>
<td><p><b>Priority</b></p></td>
<td><p><b>Summary</b></p></td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>bug</td>
<td>critical</td>
<td>Cannot import remote library in 1.4.1</td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>enhancement</td>
<td>critical</td>
<td>Support RF 2.9</td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>bug</td>
<td>high</td>
<td>'--monitorcolors' and '--monitorwidth' is deprecated WARN message</td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>bug</td>
<td>medium</td>
<td>Highlighting selected cell (and matches) does not work.</td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>bug</td>
<td>medium</td>
<td>RIDE Log shows "The C++ part of the VariablesListEditor object has been deleted, attribute access no longer allowed"</td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>bug</td>
<td>medium</td>
<td>"Find Where Used" in editor not working</td>
</tr>
</table>
<p>Altogether 6 issues.</p>
"""
| fingeronthebutton/RIDE | src/robotide/application/releasenotes.py | Python | apache-2.0 | 5,022 | [
"VisIt"
] | a711f5162eebd2b6f45ed343da092518298b04ba41dc852475f5d970bc6b7831 |
# -*- coding: utf-8 -*-
"""
flaskbb.utils.helpers
~~~~~~~~~~~~~~~~~~~~
A few helpers that are used by flaskbb
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import re
import time
import itertools
import operator
import struct
from io import BytesIO
from datetime import datetime, timedelta
import requests
from flask import session, url_for
from babel.dates import format_timedelta
from flask_themes2 import render_theme_template
from flask_login import current_user
import unidecode
from flaskbb._compat import range_method, text_type
from flaskbb.extensions import redis_store
from flaskbb.utils.settings import flaskbb_config
from flaskbb.utils.markup import markdown
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an slightly worse ASCII-only slug.
Taken from the Flask Snippets page.
:param text: The text which should be slugified
:param delim: Default "-". The delimeter for whitespace
"""
text = unidecode.unidecode(text)
result = []
for word in _punct_re.split(text.lower()):
if word:
result.append(word)
return text_type(delim.join(result))
def render_template(template, **context): # pragma: no cover
"""A helper function that uses the `render_theme_template` function
without needing to edit all the views
"""
if current_user.is_authenticated() and current_user.theme:
theme = current_user.theme
else:
theme = session.get('theme', flaskbb_config['DEFAULT_THEME'])
return render_theme_template(theme, template, **context)
def get_categories_and_forums(query_result, user):
"""Returns a list with categories. Every category has a list for all
their associated forums.
The structure looks like this::
[(<Category 1>,
[(<Forum 1>, None),
(<Forum 2>, <flaskbb.forum.models.ForumsRead at 0x38fdb50>)]),
(<Category 2>,
[(<Forum 3>, None),
(<Forum 4>, None)])]
and to unpack the values you can do this::
In [110]: for category, forums in x:
.....: print category
.....: for forum, forumsread in forums:
.....: print "\t", forum, forumsread
This will print something like this:
<Category 1>
<Forum 1> None
<Forum 2> <flaskbb.forum.models.ForumsRead object at 0x38fdb50>
<Category 2>
<Forum 3> None
<Forum 4> None
:param query_result: A tuple (KeyedTuple) with all categories and forums
:param user: The user object is needed because a signed out user does not
have the ForumsRead relation joined.
"""
it = itertools.groupby(query_result, operator.itemgetter(0))
forums = []
if user.is_authenticated():
for key, value in it:
forums.append((key, [(item[1], item[2]) for item in value]))
else:
for key, value in it:
forums.append((key, [(item[1], None) for item in value]))
return forums
def get_forums(query_result, user):
"""Returns a tuple which contains the category and the forums as list.
This is the counterpart for get_categories_and_forums and especially
usefull when you just need the forums for one category.
For example::
(<Category 2>,
[(<Forum 3>, None),
(<Forum 4>, None)])
:param query_result: A tuple (KeyedTuple) with all categories and forums
:param user: The user object is needed because a signed out user does not
have the ForumsRead relation joined.
"""
it = itertools.groupby(query_result, operator.itemgetter(0))
if user.is_authenticated():
for key, value in it:
forums = key, [(item[1], item[2]) for item in value]
else:
for key, value in it:
forums = key, [(item[1], None) for item in value]
return forums
def forum_is_unread(forum, forumsread, user):
"""Checks if a forum is unread
:param forum: The forum that should be checked if it is unread
:param forumsread: The forumsread object for the forum
:param user: The user who should be checked if he has read the forum
"""
# If the user is not signed in, every forum is marked as read
if not user.is_authenticated():
return False
read_cutoff = datetime.utcnow() - timedelta(
days=flaskbb_config["TRACKER_LENGTH"])
# disable tracker if TRACKER_LENGTH is set to 0
if flaskbb_config["TRACKER_LENGTH"] == 0:
return False
# If there are no topics in the forum, mark it as read
if forum and forum.topic_count == 0:
return False
# If the user hasn't visited a topic in the forum - therefore,
# forumsread is None and we need to check if it is still unread
if forum and not forumsread:
return forum.last_post_created > read_cutoff
try:
# check if the forum has been cleared and if there is a new post
# since it have been cleared
if forum.last_post_created > forumsread.cleared:
if forum.last_post_created < forumsread.last_read:
return False
except TypeError:
pass
# else just check if the user has read the last post
return forum.last_post_created > forumsread.last_read
def topic_is_unread(topic, topicsread, user, forumsread=None):
"""Checks if a topic is unread.
:param topic: The topic that should be checked if it is unread
:param topicsread: The topicsread object for the topic
:param user: The user who should be checked if he has read the last post
in the topic
:param forumsread: The forumsread object in which the topic is. If you
also want to check if the user has marked the forum as
read, than you will also need to pass an forumsread
object.
"""
if not user.is_authenticated():
return False
read_cutoff = datetime.utcnow() - timedelta(
days=flaskbb_config["TRACKER_LENGTH"])
# disable tracker if read_cutoff is set to 0
if flaskbb_config["TRACKER_LENGTH"] == 0:
return False
# check read_cutoff
if topic.last_post.date_created < read_cutoff:
return False
# topicsread is none if the user has marked the forum as read
# or if he hasn't visited yet
if topicsread is None:
# user has cleared the forum sometime ago - check if there is a new post
if forumsread and forumsread.cleared is not None:
return forumsread.cleared < topic.last_post.date_created
# user hasn't read the topic yet, or there is a new post since the user
# has marked the forum as read
return True
# check if there is a new post since the user's last topic visit
return topicsread.last_read < topic.last_post.date_created
def mark_online(user_id, guest=False): # pragma: no cover
"""Marks a user as online
:param user_id: The id from the user who should be marked as online
:param guest: If set to True, it will add the user to the guest activity
instead of the user activity.
Ref: http://flask.pocoo.org/snippets/71/
"""
now = int(time.time())
expires = now + (flaskbb_config['ONLINE_LAST_MINUTES'] * 60) + 10
if guest:
all_users_key = 'online-guests/%d' % (now // 60)
user_key = 'guest-activity/%s' % user_id
else:
all_users_key = 'online-users/%d' % (now // 60)
user_key = 'user-activity/%s' % user_id
p = redis_store.pipeline()
p.sadd(all_users_key, user_id)
p.set(user_key, now)
p.expireat(all_users_key, expires)
p.expireat(user_key, expires)
p.execute()
def get_online_users(guest=False): # pragma: no cover
"""Returns all online users within a specified time range
:param guest: If True, it will return the online guests
"""
current = int(time.time()) // 60
minutes = range_method(flaskbb_config['ONLINE_LAST_MINUTES'])
if guest:
return redis_store.sunion(['online-guests/%d' % (current - x)
for x in minutes])
return redis_store.sunion(['online-users/%d' % (current - x)
for x in minutes])
def crop_title(title, length=None, suffix="..."):
"""Crops the title to a specified length
:param title: The title that should be cropped
:param suffix: The suffix which should be appended at the
end of the title.
"""
length = flaskbb_config['TITLE_LENGTH'] if length is None else length
if len(title) <= length:
return title
return title[:length].rsplit(' ', 1)[0] + suffix
def render_markup(text):
"""Renders the given text as markdown
:param text: The text that should be rendered as markdown
"""
return markdown.render(text)
def is_online(user):
"""A simple check to see if the user was online within a specified
time range
:param user: The user who needs to be checked
"""
return user.lastseen >= time_diff()
def time_diff():
"""Calculates the time difference between now and the ONLINE_LAST_MINUTES
variable from the configuration.
"""
now = datetime.utcnow()
diff = now - timedelta(minutes=flaskbb_config['ONLINE_LAST_MINUTES'])
return diff
def format_date(value, format='%Y-%m-%d'):
"""Returns a formatted time string
:param value: The datetime object that should be formatted
:param format: How the result should look like. A full list of available
directives is here: http://goo.gl/gNxMHE
"""
return value.strftime(format)
def time_since(time): # pragma: no cover
"""Returns a string representing time since e.g.
3 days ago, 5 hours ago.
:param time: A datetime object
"""
delta = time - datetime.utcnow()
locale = "en"
if current_user.is_authenticated() and current_user.language is not None:
locale = current_user.language
return format_timedelta(delta, add_direction=True, locale=locale)
def format_quote(username, content):
"""Returns a formatted quote depending on the markup language.
:param username: The username of a user.
:param content: The content of the quote
"""
profile_url = url_for('user.profile', username=username)
content = "\n> ".join(content.strip().split('\n'))
quote = "**[{username}]({profile_url}) wrote:**\n> {content}\n".\
format(username=username, profile_url=profile_url, content=content)
return quote
def get_image_info(url):
"""Returns the content-type, image size (kb), height and width of a image
without fully downloading it. It will just download the first 1024 bytes.
LICENSE: New BSD License (taken from the start page of the repository)
https://code.google.com/p/bfg-pages/source/browse/trunk/pages/getimageinfo.py
"""
r = requests.get(url, stream=True)
image_size = r.headers.get("content-length")
image_size = float(image_size) / 1000 # in kilobyte
data = r.raw.read(1024)
size = len(data)
height = -1
width = -1
content_type = ''
if size:
size = int(size)
# handle GIFs
if (size >= 10) and data[:6] in (b'GIF87a', b'GIF89a'):
# Check to see if content_type is correct
content_type = 'image/gif'
w, h = struct.unpack(b'<HH', data[6:10])
width = int(w)
height = int(h)
# See PNG 2. Edition spec (http://www.w3.org/TR/PNG/)
# Bytes 0-7 are below, 4-byte chunk length, then 'IHDR'
# and finally the 4-byte width, height
elif ((size >= 24) and data.startswith(b'\211PNG\r\n\032\n') and
(data[12:16] == b'IHDR')):
content_type = 'image/png'
w, h = struct.unpack(b">LL", data[16:24])
width = int(w)
height = int(h)
# Maybe this is for an older PNG version.
elif (size >= 16) and data.startswith(b'\211PNG\r\n\032\n'):
# Check to see if we have the right content type
content_type = 'image/png'
w, h = struct.unpack(b">LL", data[8:16])
width = int(w)
height = int(h)
# handle JPEGs
elif (size >= 2) and data.startswith(b'\377\330'):
content_type = 'image/jpeg'
jpeg = BytesIO(data)
jpeg.read(2)
b = jpeg.read(1)
try:
while (b and ord(b) != 0xDA):
while (ord(b) != 0xFF):
b = jpeg.read(1)
while (ord(b) == 0xFF):
b = jpeg.read(1)
if (ord(b) >= 0xC0 and ord(b) <= 0xC3):
jpeg.read(3)
h, w = struct.unpack(b">HH", jpeg.read(4))
break
else:
jpeg.read(int(struct.unpack(b">H", jpeg.read(2))[0])-2)
b = jpeg.read(1)
width = int(w)
height = int(h)
except struct.error:
pass
except ValueError:
pass
return {"content-type": content_type, "size": image_size,
"width": width, "height": height}
def check_image(url):
"""A little wrapper for the :func:`get_image_info` function.
If the image doesn't match the ``flaskbb_config`` settings it will
return a tuple with a the first value is the custom error message and
the second value ``False`` for not passing the check.
If the check is successful, it will return ``None`` for the error message
and ``True`` for the passed check.
:param url: The image url to be checked.
"""
img_info = get_image_info(url)
error = None
if not img_info["content-type"] in flaskbb_config["AVATAR_TYPES"]:
error = "Image type is not allowed. Allowed types are: {}".format(
", ".join(flaskbb_config["AVATAR_TYPES"])
)
return error, False
if img_info["width"] > flaskbb_config["AVATAR_WIDTH"]:
error = "Image is too wide! {}px width is allowed.".format(
flaskbb_config["AVATAR_WIDTH"]
)
return error, False
if img_info["height"] > flaskbb_config["AVATAR_HEIGHT"]:
error = "Image is too high! {}px height is allowed.".format(
flaskbb_config["AVATAR_HEIGHT"]
)
return error, False
if img_info["size"] > flaskbb_config["AVATAR_SIZE"]:
error = "Image is too big! {}kb are allowed.".format(
flaskbb_config["AVATAR_SIZE"]
)
return error, False
return error, True
| SeanChen0617/flaskbb | flaskbb/utils/helpers.py | Python | bsd-3-clause | 14,644 | [
"VisIt"
] | 3aed9be4b153f4897da8544a170c144a0e56c6cf23017935158b23f3959c6b7a |
##
# title: Server.py
# by: Brian Kim
# description: the top-level script that
# executes RAMS and roams
#
| briansan/rams | RAMS/app/Server.py | Python | bsd-3-clause | 109 | [
"Brian"
] | 194e77b9cf80936e546c849655f07328ddcb831c0476ac8df7c2d2f3ee093e87 |
"""
Assesment of Generalized Estimating Equations using simulation.
Only Gaussian models are currently checked.
See the generated file "gee_simulation_check.txt" for results.
"""
from statsmodels.compat.python import range, lrange, zip
import scipy
import numpy as np
from itertools import product
from statsmodels.genmod.families import Gaussian
from statsmodels.genmod.generalized_estimating_equations import GEE
from statsmodels.genmod.dependence_structures import Autoregressive, Nested
np.set_printoptions(formatter={'all': lambda x: "%8.3f" % x},
suppress=True)
OUT = open("gee_simulation_check.txt", "w")
class GEE_simulator(object):
#
# Parameters that must be defined
#
# Number of groups
ngroups = None
# Standard deviation of the pure errors
error_sd = None
# The regression coefficients
params = None
# The parameters defining the dependence structure
dparams = None
#
# Output parameters
#
# Matrix of exogeneous data (rows are cases, columns are
# variables)
exog = None
# Matrix of endogeneous data (len(endog) = exog.shape[0])
endog = None
# Matrix of time information (time.shape[0] = len(endog))
time = None
# Group labels (len(groups) = len(endog))
group = None
# Group sizes are random within this range
group_size_range = [4, 11]
# dparams_est is dparams with scale_inv appended
def print_dparams(self, dparams_est):
raise NotImplementedError
class AR_simulator(GEE_simulator):
# The distance function for determining AR correlations.
distfun = [lambda x, y: np.sqrt(np.sum((x-y)**2)),]
def print_dparams(self, dparams_est):
OUT.write("AR coefficient estimate: %8.4f\n" %
dparams_est[0])
OUT.write("AR coefficient truth: %8.4f\n" %
self.dparams[0])
OUT.write("Error variance estimate: %8.4f\n" %
dparams_est[1])
OUT.write("Error variance truth: %8.4f\n" %
self.error_sd**2)
OUT.write("\n")
def simulate(self):
endog, exog, group, time = [], [], [], []
for i in range(self.ngroups):
gsize = np.random.randint(self.group_size_range[0],
self.group_size_range[1])
group.append([i,] * gsize)
time1 = np.random.normal(size=(gsize,2))
time.append(time1)
exog1 = np.random.normal(size=(gsize, 5))
exog1[:,0] = 1
exog.append(exog1)
# Pairwise distances within the cluster
distances = scipy.spatial.distance.cdist(time1, time1,
self.distfun[0])
# Pairwise correlations within the cluster
correlations = self.dparams[0]**distances
correlations_sr = np.linalg.cholesky(correlations)
errors = np.dot(correlations_sr, np.random.normal(size=gsize))
endog1 = np.dot(exog1, self.params) + errors * self.error_sd
endog.append(endog1)
self.exog = np.concatenate(exog, axis=0)
self.endog = np.concatenate(endog)
self.time = np.concatenate(time, axis=0)
self.group = np.concatenate(group)
class Nested_simulator(GEE_simulator):
# Vector containing list of nest sizes (used instead of
# group_size_range).
nest_sizes = None
# Matrix of nest id's (an output parameter)
id_matrix = None
def print_dparams(self, dparams_est):
for j in range(len(self.nest_sizes)):
OUT.write("Nest %d variance estimate: %8.4f\n" % \
(j+1, dparams_est[j]))
OUT.write("Nest %d variance truth: %8.4f\n" % \
(j+1, self.dparams[j]))
OUT.write("Error variance estimate: %8.4f\n" % \
(dparams_est[-1] - sum(dparams_est[0:-1])))
OUT.write("Error variance truth: %8.4f\n" %
self.error_sd**2)
OUT.write("\n")
def simulate(self):
group_effect_var = self.dparams[0]
vcomp = self.dparams[1:]
vcomp.append(0)
endog, exog, group, id_matrix = [], [], [], []
for i in range(self.ngroups):
iterators = [lrange(n) for n in self.nest_sizes]
# The random effects
variances = [np.sqrt(v)*np.random.normal(size=n)
for v,n in zip(vcomp, self.nest_sizes)]
gpe = np.random.normal() * np.sqrt(group_effect_var)
nest_all = []
for j in self.nest_sizes:
nest_all.append(set())
for nest in product(*iterators):
group.append(i)
# The sum of all random effects that apply to this
# unit
ref = gpe + sum([v[j] for v,j in zip(variances, nest)])
exog1 = np.random.normal(size=5)
exog1[0] = 1
exog.append(exog1)
error = ref + self.error_sd * np.random.normal()
endog1 = np.dot(exog1, self.params) + error
endog.append(endog1)
for j in range(len(nest)):
nest_all[j].add(tuple(nest[0:j+1]))
nest1 = [len(x)-1 for x in nest_all]
id_matrix.append(nest1[0:-1])
self.exog = np.array(exog)
self.endog = np.array(endog)
self.group = np.array(group)
self.id_matrix = np.array(id_matrix)
self.time = np.zeros_like(self.endog)
def check_constraint(da, va, ga):
"""
Check the score testing of the parameter constraints.
"""
def gen_gendat_ar0(ar):
def gendat_ar0(msg = False):
ars = AR_simulator()
ars.ngroups = 200
ars.params = np.r_[0, -1, 1, 0, 0.5]
ars.error_sd = 2
ars.dparams = [ar,]
ars.simulate()
return ars, Autoregressive()
return gendat_ar0
def gen_gendat_ar1(ar):
def gendat_ar1():
ars = AR_simulator()
ars.ngroups = 200
ars.params = np.r_[0, -0.8, 1.2, 0, 0.5]
ars.error_sd = 2
ars.dparams = [ar,]
ars.simulate()
return ars, Autoregressive()
return gendat_ar1
def gendat_nested0():
ns = Nested_simulator()
ns.error_sd = 1.
ns.params = np.r_[0., 1, 1, -1, -1]
ns.ngroups = 50
ns.nest_sizes = [10, 5]
ns.dparams = [2., 1.]
ns.simulate()
return ns, Nested(ns.id_matrix)
def gendat_nested1():
ns = Nested_simulator()
ns.error_sd = 2.
ns.params = np.r_[0, 1, 1.3, -0.8, -1.2]
ns.ngroups = 50
ns.nest_sizes = [10, 5]
ns.dparams = [1., 3.]
ns.simulate()
return ns, Nested(ns.id_matrix)
nrep = 100
gendats = [gen_gendat_ar0(ar) for ar in (0, 0.3, 0.6)]
gendats.extend([gen_gendat_ar1(ar) for ar in (0, 0.3, 0.6)])
gendats.extend([gendat_nested0, gendat_nested1])
lhs = np.array([[0., 1, 1, 0, 0],])
rhs = np.r_[0.,]
# Loop over data generating models
for gendat in gendats:
pvalues = []
params = []
std_errors = []
dparams = []
for j in range(nrep):
da,va = gendat()
ga = Gaussian()
md = GEE(da.endog, da.exog, da.group, da.time, ga, va)
mdf = md.fit()
scale_inv = 1 / md.estimate_scale()
dparams.append(np.r_[va.dparams, scale_inv])
params.append(np.asarray(mdf.params))
std_errors.append(np.asarray(mdf.standard_errors))
da,va = gendat()
ga = Gaussian()
md = GEE(da.endog, da.exog, da.group, da.time, ga, va,
constraint=(lhs, rhs))
mdf = md.fit()
score = md.score_test_results
pvalue = score["p-value"]
pvalues.append(pvalue)
dparams_mean = np.array(sum(dparams) / len(dparams))
OUT.write("Checking dependence parameters:\n")
da.print_dparams(dparams_mean)
params = np.array(params)
eparams = params.mean(0)
sdparams = params.std(0)
std_errors = np.array(std_errors)
std_errors = std_errors.mean(0)
OUT.write("Checking parameter values:\n")
OUT.write("Observed: ")
OUT.write(np.array_str(eparams) + "\n")
OUT.write("Expected: ")
OUT.write(np.array_str(da.params) + "\n")
OUT.write("Absolute difference: ")
OUT.write(np.array_str(eparams - da.params) + "\n")
OUT.write("Relative difference: ")
OUT.write(np.array_str((eparams - da.params) / da.params) + "\n")
OUT.write("\n")
OUT.write("Checking standard errors\n")
OUT.write("Observed: ")
OUT.write(np.array_str(sdparams) + "\n")
OUT.write("Expected: ")
OUT.write(np.array_str(std_errors) + "\n")
OUT.write("Absolute difference: ")
OUT.write(np.array_str(sdparams - std_errors) + "\n")
OUT.write("Relative difference: ")
OUT.write(np.array_str((sdparams - std_errors) / std_errors) + "\n")
OUT.write("\n")
pvalues.sort()
OUT.write("Checking constrained estimation:\n")
OUT.write("Left hand side:\n")
OUT.write(np.array_str(lhs) + "\n")
OUT.write("Right hand side:\n")
OUT.write(np.array_str(rhs) + "\n")
OUT.write("Observed p-values Expected Null p-values\n")
for q in np.arange(0.1, 0.91, 0.1):
OUT.write("%20.3f %20.3f\n" % (pvalues[int(q*len(pvalues))], q))
OUT.write("=" * 80 + "\n\n")
OUT.close()
| rgommers/statsmodels | statsmodels/genmod/tests/gee_simulation_check.py | Python | bsd-3-clause | 9,456 | [
"Gaussian"
] | 8156f7b06dab38edfc1927ade9566bf458d25b098faf97c9493efd217125ad2b |
# $Id$
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
""" Various bits and pieces for calculating Molecular descriptors
"""
import re
from rdkit.Chem import Descriptors as DescriptorsMod
from rdkit.ML.Descriptors import Descriptors
from rdkit.RDLogger import logger
import pickle
logger = logger()
class MolecularDescriptorCalculator(Descriptors.DescriptorCalculator):
""" used for calculating descriptors for molecules
"""
def __init__(self, simpleList, *args, **kwargs):
""" Constructor
**Arguments**
- simpleList: list of simple descriptors to be calculated
(see below for format)
**Note**
- format of simpleList:
a list of strings which are functions in the rdkit.Chem.Descriptors module
"""
self.simpleList = tuple(simpleList)
self.descriptorNames = tuple(self.simpleList)
self.compoundList = None
self._findVersions()
def _findVersions(self):
""" returns a tuple of the versions of the descriptor calculators
"""
self.descriptorVersions = []
for nm in self.simpleList:
vers = 'N/A'
if hasattr(DescriptorsMod, nm):
fn = getattr(DescriptorsMod, nm)
if hasattr(fn, 'version'):
vers = fn.version
self.descriptorVersions.append(vers)
def SaveState(self, fileName):
""" Writes this calculator off to a file so that it can be easily loaded later
**Arguments**
- fileName: the name of the file to be written
"""
try:
f = open(fileName, 'wb+')
except Exception:
logger.error('cannot open output file %s for writing' % (fileName))
return
pickle.dump(self, f)
f.close()
def CalcDescriptors(self, mol, *args, **kwargs):
""" calculates all descriptors for a given molecule
**Arguments**
- mol: the molecule to be used
**Returns**
a tuple of all descriptor values
"""
res = [-666] * len(self.simpleList)
for i, nm in enumerate(self.simpleList):
fn = getattr(DescriptorsMod, nm, lambda x: 777)
try:
res[i] = fn(mol)
except Exception:
import traceback
traceback.print_exc()
return tuple(res)
def GetDescriptorNames(self):
""" returns a tuple of the names of the descriptors this calculator generates
"""
return self.descriptorNames
def GetDescriptorSummaries(self):
""" returns a tuple of summaries for the descriptors this calculator generates
"""
res = []
for nm in self.simpleList:
fn = getattr(DescriptorsMod, nm, lambda x: 777)
if hasattr(fn, '__doc__') and fn.__doc__:
doc = fn.__doc__.split('\n\n')[0].strip()
doc = re.sub(' *\n *', ' ', doc)
else:
doc = 'N/A'
res.append(doc)
return res
def GetDescriptorFuncs(self):
""" returns a tuple of the functions used to generate this calculator's descriptors
"""
res = []
for nm in self.simpleList:
fn = getattr(DescriptorsMod, nm, lambda x: 777)
res.append(fn)
return tuple(res)
def GetDescriptorVersions(self):
""" returns a tuple of the versions of the descriptor calculators
"""
return tuple(self.descriptorVersions)
| bp-kelley/rdkit | rdkit/ML/Descriptors/MoleculeDescriptors.py | Python | bsd-3-clause | 3,244 | [
"RDKit"
] | e0c364a6f8aee2ddb6dab638232748c953226bd700d013468bccd3f93e304f8f |
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import requests
from openbabel import pybel
from openbabel import openbabel as ob
# TODO: process Open Babel resdata.txt
# if we can find certain non-standard residues
mdLigands = [
"ASH", # Neutral ASP
"CYX", # SS-bonded CYS
"CYM", # Negative CYS
"GLH", # Neutral GLU
"HIP", # Positive HIS
"HID", # Neutral HIS, proton HD1 present
"HIE", # Neutral HIS, proton HE2 present
"LYN", # Neutral LYS
"TYM", # Negative TYR
]
# the location of the LigandExpo list by count
ligandURL = "http://ligand-expo.rcsb.org/dictionaries/cc-counts.tdd"
# URL for the ideal geometry
# e.g http://ligand-expo.rcsb.org/reports/H/HEM/HEM_ideal.pdb
sdfTemplate = "http://ligand-expo.rcsb.org/reports/{}/{}/{}_ideal.sdf"
# URL for the ideal geometry (PDB)
pdbTemplate = "http://ligand-expo.rcsb.org/reports/{}/{}/{}_ideal.pdb"
# save ligands with at least this # of occurrences
ligandThresh = 500
# default ligand list
ligands = [
# amino acids
"ALA", "CYS", "ASP", "GLU", "PHE", "GLY", "HIS", "ILE", "LYS", "LEU",
"MET", "ASN", "PRO", "GLN", "ARG", "SER", "THR", "VAL", "TRP", "TYR",
# DNA nucleic
"DA", "DC", "DG", "DT", "DI",
# RNA nucleic
"A", "C", "G", "U", "I",
# misc
"HEM", "HOH"
]
# okay, we build up the list of ligands to fetch
r = requests.get(ligandURL, stream=True)
for line in r.iter_lines(decode_unicode=True):
if 'count' in str(line):
continue # skip first line
name, count = line.split()
if (int(count) < ligandThresh):
# too rare, we'll skip the rest of the list
break
if str(name) not in ligands:
ligands.append(str(name))
print(
'''
#ifndef AVOGADRO_CORE_RESIDUE_DATA
#define AVOGADRO_CORE_RESIDUE_DATA
#include <map>
#include <string>
#include <vector>
namespace Avogadro {
namespace Core {
class ResidueData
{
private:
std::string m_residueName;
std::map<std::string, int> m_residueAtomNames;
std::vector<std::pair<std::string, std::string>> m_residueSingleBonds;
std::vector<std::pair<std::string, std::string>> m_residueDoubleBonds;
public:
ResidueData() {}
ResidueData(std::string name,
std::map<std::string, int> atomNames,
std::vector<std::pair<std::string, std::string>> singleBonds,
std::vector<std::pair<std::string, std::string>> doubleBonds)
{
m_residueName = name;
m_residueAtomNames = atomNames;
m_residueSingleBonds = singleBonds;
m_residueDoubleBonds = doubleBonds;
}
ResidueData(const ResidueData& other)
{
m_residueName = other.m_residueName;
m_residueAtomNames = other.m_residueAtomNames;
m_residueSingleBonds = other.m_residueSingleBonds;
m_residueDoubleBonds = other.m_residueDoubleBonds;
}
ResidueData& operator=(ResidueData other)
{
using std::swap;
swap(*this, other);
return *this;
}
std::map<std::string, int> residueAtoms() {
return m_residueAtomNames;
}
std::vector<std::pair<std::string, std::string>> residueSingleBonds()
{
return m_residueSingleBonds;
}
std::vector<std::pair<std::string, std::string>> residueDoubleBonds()
{
return m_residueDoubleBonds;
}
};
'''
)
final_ligands = []
for ligand in ligands:
sdf = requests.get(sdfTemplate.format(ligand[0], ligand, ligand))
# there *must* be a way to do this from a requests buffer, but this works
with open('temp.sdf', 'wb') as handle:
for block in sdf.iter_content(1024):
handle.write(block)
try:
mol_sdf = next(pybel.readfile("sdf", 'temp.sdf'))
except StopIteration:
continue
if len(mol_sdf.atoms) < 2:
continue
final_ligands.append(ligand)
pdb = requests.get(pdbTemplate.format(ligand[0], ligand, ligand))
with open('temp.pdb', 'wb') as handle:
for block in pdb.iter_content(1024):
handle.write(block)
try:
mol_pdb = next(pybel.readfile("pdb", 'temp.pdb'))
except StopIteration:
continue
atom_map = {}
for i in range(len(mol_sdf.atoms)):
idx = mol_sdf.atoms[i].idx
atom = mol_pdb.atoms[i].OBAtom
res = atom.GetResidue()
# build up a map between atom index and atom ID
atom_map[idx] = res.GetAtomID(atom).strip().rstrip(), atom.GetAtomicNum()
# go through bonds
single_bonds = []
double_bonds = []
for bond in ob.OBMolBondIter(mol_sdf.OBMol):
begin = bond.GetBeginAtomIdx()
end = bond.GetEndAtomIdx()
if bond.GetBondOrder() == 2:
double_bonds.append((atom_map[begin][0], atom_map[end][0]))
elif bond.GetBondOrder() == 1:
single_bonds.append((atom_map[begin][0], atom_map[end][0]))
# print out the residue data
print('ResidueData %sData("%s",' % (ligand, ligand))
print('// Atoms')
print('{')
for atom in list(atom_map.values())[:-1]:
print('{ "%s", %d },' % (atom[0], atom[1]), end='')
print('{"%s", %d }' % (atom[0], atom[1]))
print('},')
print('// Single Bonds')
print('{')
for bond in single_bonds[:-1]:
print('{ "%s", "%s" },' % bond, end='')
print('{ "%s", "%s" }' % single_bonds[-1])
print('},')
print('// Double Bonds')
print('{')
if len(double_bonds):
for bond in double_bonds[:-1]:
print('{ "%s", "%s" },' % bond, end='')
print('{ "%s", "%s" }' % double_bonds[-1])
print('}')
print(');')
print('''std::map<std::string, ResidueData> residueDict = {''')
# print the list of ligands
for ligand in final_ligands:
print('{ "%s", %sData },' % (ligand, ligand))
print('''
};
}
}
#endif
'''
)
os.remove("temp.sdf")
os.remove('temp.pdb')
| OpenChemistry/avogadrolibs | scripts/getresdata.py | Python | bsd-3-clause | 5,683 | [
"Avogadro",
"Open Babel",
"Pybel"
] | a59d5f9fe0bf09daba9a35dc62498b796cb45a2880dd324a89be1e67badb164d |
"""
Created August 14 2014
James Houghton <james.p.houghton@gmail.com>
Changed May 03 2017
Alexey Prey Mulyukin <alexprey@yandex.ru> from sdCloud.io developement team
Changes:
[May 03 2017] Alexey Prey Mulyukin: Integrate support to
logical operators like 'AND', 'OR' and 'NOT'.
Fix support the whitespaces in expressions between
operators and operands.
Add support to modulo operator - 'MOD'.
Fix support for case insensitive in function names.
This module converts a string of SMILE syntax into Python
"""
import parsimonious
from parsimonious.nodes import NodeVisitor
import pkg_resources
import re
from .. import builder, utils
# Here we define which python function each XMILE keyword corresponds to
functions = {
# ===
# 3.5.1 Mathematical Functions
# http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039980
# ===
"abs": "abs",
"int": "int",
"inf": {"name": "np.inf", "module": "numpy"},
"exp": {"name": "np.exp", "module": "numpy"},
"sin": {"name": "np.sin", "module": "numpy"},
"cos": {"name": "np.cos", "module": "numpy"},
"tan": {"name": "np.tan", "module": "numpy"},
"arcsin": {"name": "np.arcsin", "module": "numpy"},
"arccos": {"name": "np.arccos", "module": "numpy"},
"arctan": {"name": "np.arctan", "module": "numpy"},
"sqrt": {"name": "np.sqrt", "module": "numpy"},
"ln": {"name": "np.log", "module": "numpy"},
"log10": {"name": "np.log10", "module": "numpy"},
"max": "max",
"min": "min",
# ===
# 3.5.2 Statistical Functions
# http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039981
# ===
"exprnd": {"name": "np.random.exponential", "module": "numpy"},
"lognormal": {"name": "np.random.lognormal", "module": "numpy"},
"normal": {"name": "np.random.normal", "module": "numpy"},
"poisson": {"name": "np.random.poisson", "module": "numpy"},
"random": {"name": "np.random.rand", "module": "numpy"},
# ===
# 3.5.4 Test Input Functions
# http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039983
# ===
"pulse": {
"name": "pulse_magnitude",
"parameters": [
{"name": 'time', "type": "time"},
{"name": 'magnitude'},
{"name": 'start'},
{"name": "repeat_time", "optional": True}
],
"module": "functions"
},
"step": {
"name": "step",
"parameters": [
{"name": 'time', "type": 'time'},
{"name": 'value'},
{"name": 'tstep'}
],
"module": "functions"
},
# time, slope, start, finish=0
"ramp": {
"name": "ramp",
"parameters": [
{"name": 'time', "type": 'time'},
{"name": 'slope'},
{"name": 'start'},
{"name": 'finish', "optional": True}
],
"module": "functions"
},
# ===
# 3.5.6 Miscellaneous Functions
# http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039985
# ===
"if then else": {
"name": "if_then_else",
"parameters": [
{"name": 'condition'},
{"name": 'val_if_true', "type": 'lambda'},
{"name": 'val_if_false', "type": 'lambda'}
],
"module": "functions"
},
# TODO functions/stateful objects to be added
# https://github.com/JamesPHoughton/pysd/issues/154
"forecast": {"name": "not_implemented_function", "module": "functions",
"original_name": "forecast"},
"previous": {"name": "not_implemented_function", "module": "functions",
"original_name": "previous"},
"self": {"name": "not_implemented_function", "module": "functions",
"original_name": "self"}
}
prefix_operators = {
"not": " not ",
"-": "-",
"+": " ",
}
infix_operators = {
"and": " and ",
"or": " or ",
"=": "==",
"<=": "<=",
"<": "<",
">=": ">=",
">": ">",
"<>": "!=",
"^": "**",
"+": "+",
"-": "-",
"*": "*",
"/": "/",
"mod": "%",
}
# ====
# 3.5.3 Delay Functions
# http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039982
# ====
builders = {
# "delay" !TODO! How to add the infinity delay?
"delay1": lambda element, subscript_dict, args:
builder.add_n_delay(
identifier=element["py_name"],
delay_input=args[0],
delay_time=args[1],
initial_value=args[2] if len(args) > 2 else args[0],
order="1",
subs=element["subs"],
merge_subs=None,
deps=element["dependencies"]
),
"delay3": lambda element, subscript_dict, args:
builder.add_n_delay(
identifier=element["py_name"],
delay_input=args[0],
delay_time=args[1],
initial_value=args[2] if len(args) > 2 else args[0],
order="3",
subs=element["subs"],
merge_subs=None,
deps=element["dependencies"]
),
"delayn": lambda element, subscript_dict, args:
builder.add_n_delay(
identifier=element["py_name"],
delay_input=args[0],
delay_time=args[1],
initial_value=args[2] if len(args) > 3 else args[0],
order=args[2],
subs=element["subs"],
merge_subs=None,
deps=element["dependencies"]
),
"smth1": lambda element, subscript_dict, args:
builder.add_n_smooth(
identifier=element["py_name"],
smooth_input=args[0],
smooth_time=args[1],
initial_value=args[2] if len(args) > 2 else args[0],
order="1",
subs=element["subs"],
merge_subs=None,
deps=element["dependencies"]
),
"smth3": lambda element, subscript_dict, args:
builder.add_n_smooth(
identifier=element["py_name"],
smooth_input=args[0],
smooth_time=args[1],
initial_value=args[2] if len(args) > 2 else args[0],
order="3",
subs=element["subs"],
merge_subs=None,
deps=element["dependencies"]
),
"smthn": lambda element, subscript_dict, args:
builder.add_n_smooth(
identifier=element["py_name"],
smooth_input=args[0],
smooth_time=args[1],
initial_value=args[2] if len(args) > 3 else args[0],
order=args[2],
subs=element["subs"],
merge_subs=None,
deps=element["dependencies"]
),
# "forcst" !TODO!
"trend": lambda element, subscript_dict, args:
builder.add_n_trend(
identifier=element["py_name"],
trend_input=args[0],
average_time=args[1],
initial_trend=args[2] if len(args) > 2 else 0,
subs=element["subs"],
merge_subs=None,
deps=element["dependencies"]
),
"init": lambda element, subscript_dict, args:
builder.add_initial(
identifier=element["py_name"],
value=args[0],
deps=element["dependencies"]),
}
def format_word_list(word_list):
return '|'.join(
[re.escape(k) for k in reversed(sorted(word_list, key=len))])
class SMILEParser(NodeVisitor):
def __init__(self, model_namespace={}, subscript_dict={}):
self.model_namespace = model_namespace
self.subscript_dict = subscript_dict
self.extended_model_namespace = {
key.replace(' ', '_'): value
for key, value in self.model_namespace.items()}
self.extended_model_namespace.update(self.model_namespace)
# ===
# 3.5.5 Time Functions
# http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039984
# ===
self.extended_model_namespace.update({'dt': 'time_step'})
self.extended_model_namespace.update({'starttime': 'initial_time'})
self.extended_model_namespace.update({'endtime': 'final_time'})
grammar = pkg_resources.resource_string(
"pysd", "translation/xmile/smile.grammar")
grammar = grammar.decode('ascii').format(
funcs=format_word_list(functions.keys()),
in_ops=format_word_list(infix_operators.keys()),
pre_ops=format_word_list(prefix_operators.keys()),
identifiers=format_word_list(self.extended_model_namespace.keys()),
build_keywords=format_word_list(builders.keys())
)
self.grammar = parsimonious.Grammar(grammar)
def parse(self, text, element, context='eqn'):
"""
context : <string> 'eqn', 'defn'
If context is set to equation, lone identifiers will be
parsed as calls to elements. If context is set to definition,
lone identifiers will be cleaned and returned.
"""
# Remove the inline comments from `text` before parsing the grammar
# http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039973
text = re.sub(r"\{[^}]*\}", "", text)
if "dependencies" not in element:
element["dependencies"] = dict()
self.ast = self.grammar.parse(text)
self.context = context
self.element = element
self.new_structure = []
py_expr = self.visit(self.ast)
return ({
'py_expr': py_expr
}, self.new_structure)
def visit_conditional_statement(self, n, vc):
return builder.build_function_call(functions["if then else"], vc[2::4])
def visit_user_call_identifier(self, n, vc):
return self.extended_model_namespace[n.text]
def visit_user_call_quoted_identifier(self, n, vc):
return self.extended_model_namespace[vc[1]]
def visit_identifier(self, n, vc):
subelement = self.extended_model_namespace[n.text]
utils.update_dependency(subelement, self.element["dependencies"])
return subelement + '()'
def visit_quoted_identifier(self, n, vc):
subelement = self.extended_model_namespace[vc[1]]
utils.update_dependency(subelement, self.element["dependencies"])
return subelement + '()'
def visit_call(self, n, vc):
function_name = vc[0].lower()
arguments = [e.strip() for e in vc[4].split(",")]
return builder.build_function_call(
functions[function_name], arguments, self.element["dependencies"])
def visit_user_call(self, n, vc):
return vc[0] + '(' + vc[4] + ')'
def visit_build_call(self, n, vc):
builder_name = vc[0].lower()
arguments = [e.strip() for e in vc[4].split(",")]
name, structure = builders[builder_name](
self.element, self.subscript_dict, arguments)
self.new_structure += structure
self.element["dependencies"] = {structure[-1]["py_name"]: 1}
return name
def visit_pre_oper(self, n, vc):
return prefix_operators[n.text.lower()]
def visit_in_oper(self, n, vc):
return infix_operators[n.text.lower()]
def generic_visit(self, n, vc):
"""
Replace childbearing nodes with a list of their children;
for leaves, return the node text;
for empty nodes, return an empty string.
Handles:
- call
- parens
-
"""
return ''.join(filter(None, vc)) or n.text or ''
| JamesPHoughton/pysd | pysd/translation/xmile/SMILE2Py.py | Python | mit | 11,669 | [
"VisIt"
] | 668bd9329f5aae494d98f182e4cf072886f44871b6e61b2b2af6d94c6f354823 |
#!/usr/bin/env python
import unittest
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
from pymatgen.symmetry.groups import SpaceGroup
from pymatgen.util.testing import PymatgenTest
__author__ = "Matthew Horton"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Beta"
__date__ = "Feb 2017"
class MagneticSpaceGroupTest(PymatgenTest):
def setUp(self):
self.msg_1 = MagneticSpaceGroup([70, 530])
self.msg_2 = MagneticSpaceGroup([62, 448])
self.msg_3 = MagneticSpaceGroup([20, 37])
def test_init(self):
# test init with the following space group:
# 71.538 (BNS number), I_cmmm (BNS label)
# 65.10.554 (same space group as above, OG number), C_Immm (OG label)
msg_from_bns_1 = MagneticSpaceGroup("I_cmmm")
msg_from_bns_2 = MagneticSpaceGroup([71, 538])
msg_from_og_1 = MagneticSpaceGroup.from_og("C_Immm")
msg_from_og_2 = MagneticSpaceGroup.from_og([65, 10, 554])
self.assertEqual(msg_from_bns_1, msg_from_bns_2)
self.assertEqual(msg_from_og_1, msg_from_og_2)
self.assertEqual(msg_from_bns_1, msg_from_og_1)
def test_crystal_system(self):
self.assertEqual(self.msg_1.crystal_system, "orthorhombic")
self.assertEqual(self.msg_2.crystal_system, "orthorhombic")
self.assertEqual(self.msg_3.crystal_system, "orthorhombic")
def test_sg_symbol(self):
self.assertEqual(self.msg_1.sg_symbol, "Fd'd'd")
self.assertEqual(self.msg_2.sg_symbol, "Pn'ma'")
self.assertEqual(self.msg_3.sg_symbol, "C_A222_1")
def test_is_compatible(self):
cubic = Lattice.cubic(1)
hexagonal = Lattice.hexagonal(1, 2)
rhom = Lattice.rhombohedral(3, 80)
tet = Lattice.tetragonal(1, 2)
ortho = Lattice.orthorhombic(1, 2, 3)
msg = MagneticSpaceGroup("Fm-3m")
self.assertTrue(msg.is_compatible(cubic))
self.assertFalse(msg.is_compatible(hexagonal))
msg = MagneticSpaceGroup("Pnma")
self.assertTrue(msg.is_compatible(cubic))
self.assertTrue(msg.is_compatible(tet))
self.assertTrue(msg.is_compatible(ortho))
self.assertFalse(msg.is_compatible(rhom))
self.assertFalse(msg.is_compatible(hexagonal))
msg = MagneticSpaceGroup("P2/c")
self.assertTrue(msg.is_compatible(cubic))
self.assertTrue(msg.is_compatible(tet))
self.assertTrue(msg.is_compatible(ortho))
self.assertFalse(msg.is_compatible(rhom))
self.assertFalse(msg.is_compatible(hexagonal))
msg = MagneticSpaceGroup("P-1")
self.assertTrue(msg.is_compatible(cubic))
self.assertTrue(msg.is_compatible(tet))
self.assertTrue(msg.is_compatible(ortho))
self.assertTrue(msg.is_compatible(rhom))
self.assertTrue(msg.is_compatible(hexagonal))
def test_symmetry_ops(self):
msg_1_symmops = "\n".join([str(op) for op in self.msg_1.symmetry_ops])
msg_1_symmops_ref = """x, y, z, +1
-x+3/4, -y+3/4, z, +1
-x, -y, -z, +1
x+1/4, y+1/4, -z, +1
x, -y+3/4, -z+3/4, -1
-x+3/4, y, -z+3/4, -1
-x, y+1/4, z+1/4, -1
x+1/4, -y, z+1/4, -1
x, y+1/2, z+1/2, +1
-x+3/4, -y+5/4, z+1/2, +1
-x, -y+1/2, -z+1/2, +1
x+1/4, y+3/4, -z+1/2, +1
x, -y+5/4, -z+5/4, -1
-x+3/4, y+1/2, -z+5/4, -1
-x, y+3/4, z+3/4, -1
x+1/4, -y+1/2, z+3/4, -1
x+1/2, y, z+1/2, +1
-x+5/4, -y+3/4, z+1/2, +1
-x+1/2, -y, -z+1/2, +1
x+3/4, y+1/4, -z+1/2, +1
x+1/2, -y+3/4, -z+5/4, -1
-x+5/4, y, -z+5/4, -1
-x+1/2, y+1/4, z+3/4, -1
x+3/4, -y, z+3/4, -1
x+1/2, y+1/2, z, +1
-x+5/4, -y+5/4, z, +1
-x+1/2, -y+1/2, -z, +1
x+3/4, y+3/4, -z, +1
x+1/2, -y+5/4, -z+3/4, -1
-x+5/4, y+1/2, -z+3/4, -1
-x+1/2, y+3/4, z+1/4, -1
x+3/4, -y+1/2, z+1/4, -1"""
msg_2_symmops = "\n".join([str(op) for op in self.msg_2.symmetry_ops])
msg_2_symmops_ref = """x, y, z, +1
-x, y+1/2, -z, +1
-x, -y, -z, +1
x, -y+1/2, z, +1
x+1/2, -y+1/2, -z+1/2, -1
-x+1/2, -y, z+1/2, -1
-x+1/2, y+1/2, z+1/2, -1
x+1/2, y, -z+1/2, -1"""
self.assertStrContentEqual(msg_2_symmops, msg_2_symmops_ref)
msg_3_symmops = "\n".join([str(op) for op in self.msg_3.symmetry_ops])
msg_3_symmops_ref = """x, y, z, +1
x, -y, -z, +1
-x, y, -z+1/2, +1
-x, -y, z+1/2, +1
x, y+1/2, z+1/2, -1
x+1/2, -y, -z+1/2, -1
-x+1/2, y, -z, -1
-x+1/2, -y, z, -1
x+1/2, y+1/2, z, +1
x+1/2, -y+1/2, -z, +1
-x+1/2, y+1/2, -z+1/2, +1
-x+1/2, -y+1/2, z+1/2, +1
x+1/2, y+1, z+1/2, -1
x+1, -y+1/2, -z+1/2, -1
-x+1, y+1/2, -z, -1
-x+1, -y+1/2, z, -1"""
self.assertEqual(msg_3_symmops, msg_3_symmops_ref)
def test_equivalence_to_spacegroup(self):
# first 230 magnetic space groups have same symmetry operations
# as normal space groups, so should give same orbits
labels = ["Fm-3m", "Pnma", "P2/c", "P-1"]
points = [[0, 0, 0],
[0.5, 0, 0],
[0.11, 0.22, 0.33]]
for label in labels:
sg = SpaceGroup(label)
msg = MagneticSpaceGroup(label)
self.assertEqual(sg.crystal_system, msg.crystal_system)
for p in points:
pp_sg = np.array(sg.get_orbit(p))
pp_msg = np.array(msg.get_orbit(p, 0)[0]) # discarding magnetic moment information
pp_sg = pp_sg[np.lexsort(np.transpose(pp_sg)[::-1])] # sorting arrays so we can compare them
pp_msg = pp_msg[np.lexsort(np.transpose(pp_msg)[::-1])]
self.assertTrue(np.allclose(pp_sg, pp_msg))
def test_str(self):
msg = MagneticSpaceGroup([4, 11])
ref_string = """BNS: 4.11 P_b2_1
Operators: (1|0,0,0) (2y|0,1/2,0) (1|0,1/2,0)' (2y|0,0,0)'
Wyckoff Positions:
4e (x,y,z;mx,my,mz) (-x,y+1/2,-z;-mx,my,-mz) (x,y+1/2,z;-mx,-my,-mz)
(-x,y,-z;mx,-my,mz)
2d (1/2,y,1/2;mx,0,mz) (1/2,y+1/2,1/2;-mx,0,-mz)
2c (1/2,y,0;mx,0,mz) (1/2,y+1/2,0;-mx,0,-mz)
2b (0,y,1/2;mx,0,mz) (0,y+1/2,1/2;-mx,0,-mz)
2a (0,y,0;mx,0,mz) (0,y+1/2,0;-mx,0,-mz)
Alternative OG setting exists for this space group."""
ref_string_all = """BNS: 4.11 P_b2_1 OG: 3.7.14 P_2b2'
OG-BNS Transform: (a,2b,c;0,0,0)
Operators (BNS): (1|0,0,0) (2y|0,1/2,0) (1|0,1/2,0)' (2y|0,0,0)'
Wyckoff Positions (BNS):
4e (x,y,z;mx,my,mz) (-x,y+1/2,-z;-mx,my,-mz) (x,y+1/2,z;-mx,-my,-mz)
(-x,y,-z;mx,-my,mz)
2d (1/2,y,1/2;mx,0,mz) (1/2,y+1/2,1/2;-mx,0,-mz)
2c (1/2,y,0;mx,0,mz) (1/2,y+1/2,0;-mx,0,-mz)
2b (0,y,1/2;mx,0,mz) (0,y+1/2,1/2;-mx,0,-mz)
2a (0,y,0;mx,0,mz) (0,y+1/2,0;-mx,0,-mz)
Operators (OG): (1|0,0,0) (2y|0,1,0) (1|0,1,0)' (2y|0,0,0)'
Wyckoff Positions (OG): (1,0,0)+ (0,2,0)+ (0,0,1)+
4e (x,y,z;mx,my,mz) (-x,y+1,-z;-mx,my,-mz) (x,y+1,z;-mx,-my,-mz)
(-x,y,-z;mx,-my,mz)
2d (1/2,y,1/2;mx,0,mz) (-1/2,y+1,-1/2;-mx,0,-mz)
2c (1/2,y,0;mx,0,mz) (-1/2,y+1,0;-mx,0,-mz)
2b (0,y,1/2;mx,0,mz) (0,y+1,-1/2;-mx,0,-mz)
2a (0,y,0;mx,0,mz) (0,y+1,0;-mx,0,-mz)"""
self.assertStrContentEqual(str(msg), ref_string)
self.assertStrContentEqual(msg.data_str(), ref_string_all)
if __name__ == '__main__':
unittest.main()
| tschaume/pymatgen | pymatgen/symmetry/tests/test_maggroups.py | Python | mit | 7,182 | [
"pymatgen"
] | 1a644aac36db8b74107348a68c7f5db349c83120fec12298ab33364385a7de8a |
"""
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation does NOT include synapses, for better
comparison to Figure 5 of (Goodman and Brette, 2008) - although it uses the
COBAHH model of (Brette et al. 2007), not CUBA.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian_benchmark_COBAHH_nosyn_128/pbsout/brian_benchmark_COBAHH_nosyn_128.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319
in order to work with version 2 of the Brian simulator (aka Brian2), and also
modified to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
# Parameters
cells = 128
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# # Time constants
# taue = 5*ms
# taui = 10*ms
# # Reversal potentials
# Ee = 0*mV
# Ei = -80*mV
# we = 6*nS # excitatory synaptic weight
# wi = 67*nS # inhibitory synaptic weight
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
''')
# dv/dt = (gl*(El-v)+ge*(Ee-v)+gi*(Ei-v)-
# dge/dt = -ge*(1./taue) : siemens
# dgi/dt = -gi*(1./taui) : siemens
P = NeuronGroup(cells, model=eqs, threshold='v>-20*mV', refractory=3*ms,
method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Ce = Synapses(Pe, P, on_pre='ge+=we')
# Ci = Synapses(Pi, P, on_pre='gi+=wi')
# Ce.connect(p=0.98)
# Ci.connect(p=0.98)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# P.ge = '(randn() * 1.5 + 4) * 10.*nS'
# P.gi = '(randn() * 12 + 20) * 10.*nS'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
| asoplata/dynasim-benchmark-brette-2007 | output/Brian2/brian2_benchmark_COBAHH_nosyn_0128/brian2_benchmark_COBAHH_nosyn_0128.py | Python | gpl-3.0 | 3,347 | [
"Brian"
] | c403baff7f3633de7b1d9d16d5b27eaf7ff124a07f73a5b93ee5e94fd172a7cb |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Functions Analysis Module """
import json
import os
from operator import itemgetter, attrgetter
from statistics import mean
from functools import reduce
from itertools import chain
from copy import copy
from collections.abc import Mapping
from enum import IntEnum
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
from lisa.utils import groupby, memoized, FrozenDict, unzip_into
from lisa.datautils import df_merge
from lisa.analysis.base import TraceAnalysisBase, AnalysisHelpers
from lisa.analysis.load_tracking import LoadTrackingAnalysis
from lisa.trace import requires_events, requires_one_event_of, MissingTraceEventError
from lisa.conf import ConfigKeyError
from lisa.stats import Stats
from lisa.pelt import PELT_SCALE
class FunctionsAnalysis(TraceAnalysisBase):
"""
Support for ftrace events-based kernel functions profiling and analysis
"""
name = 'functions'
def df_resolve_ksym(self, df, addr_col, name_col='func_name', addr_map=None, exact=True):
"""
Resolve the kernel function names.
.. note:: If the ``addr_col`` is not of a numeric dtype, it will be
assumed to be function names already and the content will be copied
to ``name_col``.
:param df: Dataframe to augment
:type df: pandas.DataFrame
:param addr_col: Name of the column containing a kernel address.
:type addr_col: str
:param name_col: Name of the column to create with symbol names
:param name_col: str
:param addr_map: If provided, the mapping of kernel addresses to symbol
names. If missing, the symbols addresses from the
:class:`lisa.platforms.platinfo.PlatformInfo` attached to the trace
will be used.
:type addr_map: dict(int, str)
:param exact: If ``True``, an exact symbol address is expected. If
``False``, symbol addresses are sorted and paired to form
intervals, which are then used to infer the name. This is suited to
resolve an instruction pointer that could point anywhere inside of
a function (but before the starting address of the next function).
:type exact: bool
"""
trace = self.trace
df = df.copy(deep=False)
# Names already resolved, we can just copy the address column to the
# name one
if not is_numeric_dtype(df[addr_col].dtype):
df[name_col] = df[addr_col]
return df
if addr_map is None:
addr_map = trace.plat_info['kernel']['symbols-address']
if exact:
df[name_col] = df[addr_col].map(addr_map)
# Not exact means the function addresses will be used as ranges, so
# we can find in which function any instruction point value is
else:
# Sort by address, so that each consecutive pair of address
# constitue a range of address belonging to a given function.
addr_list = sorted(
addr_map.items(),
key=itemgetter(0)
)
bins, labels = zip(*addr_list)
# "close" the last bucket with the highest value possible of that column
max_addr = np.iinfo(df[addr_col].dtype).max
bins = list(bins) + [max_addr]
name_i = pd.cut(
df[addr_col],
bins=bins,
# Since our labels are not unique, we cannot pass it here
# directly. Instead, use an index into the labels list
labels=range(len(labels)),
# Include the left boundary and exclude the right one
include_lowest=True,
right=False,
)
df[name_col] = name_i.apply(lambda x: labels[x])
return df
def _df_with_ksym(self, event, *args, **kwargs):
df = self.trace.df_event(event)
try:
return self.df_resolve_ksym(df, *args, **kwargs)
except ConfigKeyError:
self.get_logger().warning(f'Missing symbol addresses, function names will not be resolved: {e}')
return df
@requires_one_event_of('funcgraph_entry', 'funcgraph_exit')
@TraceAnalysisBase.cache
def df_funcgraph(self, event):
"""
Return augmented dataframe of the event with the following column:
* ``func_name``: Name of the calling function if it could be
resolved.
:param event: One of:
* ``entry`` (``funcgraph_entry`` event)
* ``exit`` (``funcgraph_exit`` event)
:type event: str
"""
event = f'funcgraph_{event}'
return self._df_with_ksym(event, 'func', 'func_name', exact=False)
@df_funcgraph.used_events
@LoadTrackingAnalysis.df_cpus_signal.used_events
def _get_callgraph(self, tag_df=None, thread_root_functions=None):
entry_df = self.df_funcgraph(event='entry').copy(deep=False)
entry_df['event'] = _CallGraph._EVENT.ENTRY
exit_df = self.df_funcgraph(event='exit').copy(deep=False)
exit_df['event'] = _CallGraph._EVENT.EXIT
# Attempt to get the CPU capacity signal to normalize the results
capacity_cols = ['__cpu', 'event', 'capacity']
try:
capacity_df = self.trace.analysis.load_tracking.df_cpus_signal('capacity')
except MissingTraceEventError:
capacity_df = pd.DataFrame(columns=capacity_cols)
else:
capacity_df = capacity_df.copy(deep=False)
capacity_df['__cpu'] = capacity_df['cpu']
capacity_df['event'] = _CallGraph._EVENT.SET_CAPACITY
capacity_df = capacity_df[capacity_cols]
# Set a reasonable initial capacity
try:
orig_capacities = self.trace.plat_info['cpu-capacities']['orig']
except KeyError:
pass
else:
orig_capacities_df = pd.DataFrame.from_records(
(
(-1 * cpu, cpu, _CallGraph._EVENT.SET_CAPACITY, cap)
for cpu, cap in orig_capacities.items()
),
columns=['Time', '__cpu', 'event', 'capacity'],
index='Time',
)
capacity_df = pd.concat((orig_capacities_df, capacity_df))
to_merge = [entry_df, exit_df, capacity_df]
if tag_df is not None:
cpu = tag_df['__cpu']
tag_df = tag_df.drop(columns=['__cpu'])
tag_df = pd.DataFrame(dict(
tags=tag_df.apply(pd.Series.to_dict, axis=1),
__cpu=cpu,
))
tag_df['event'] = _CallGraph._EVENT.SET_TAG
to_merge.append(tag_df)
df = df_merge(to_merge)
return _CallGraph.from_df(
df,
thread_root_functions=thread_root_functions
)
@_get_callgraph.used_events
def df_calls(self, tag_df=None, thread_root_functions=None, normalize=True):
"""
Return a :class:`pandas.DataFrame` with a row for each function call,
along some metrics:
* ``cum_time``: cumulative time spent in that function. This
includes the time spent in all children too.
* ``self_time``: time spent in that function only. This
excludes the time spent in all children.
:param tag_df: Dataframe containing the tag event, which is used to tag
paths in the callgraph. The ``__cpu`` column is mandatory in order
to know which CPU is to be tagged at any index. Other colunms will
be used as tag keys. Tags are inherited from from both parents and
children. This allows a leaf function to emit an event and use it
for the whole path that lead to there. Equally, if a function emits
a tag, all the children of this call will inherit the tag too. This
allows a top-level function to tag a whole subtree at once.
:type tag_df: pandas.DataFrame
:param thread_root_functions: Functions that are considered to be a
root of threads. When they appear in the callgraph, the profiler
will consider the current function to be preempted and will not
register the call as a child of it and will avoid to count it in
the cumulative time.
:type thread_root_functions: list(str) or None
:param normalize: Normalize metrics according to the current CPU
capacity so that they appear to have run on the fastest CPU at
maximum frequency. This allows merging calls regardless of their
origin (CPU and frequency).
.. note:: Normalization only currently takes into account the
capacity of the CPU when the function is entered. If it changes
during execution, the result will be somewhat wrong.
:type normalize: bool
.. note:: Calls during which the current function name changes are not
accounted for. They are typically a sign of functions that did not
properly return, for example functions triggering a context switch
and returning to userspace.
"""
graph = self._get_callgraph(
tag_df=tag_df,
thread_root_functions=thread_root_functions,
)
metrics = _CallGraphNode._METRICS
def get_metric(node, metric):
val = node[metric]
if normalize:
return (node.cpu_capacity / PELT_SCALE) * val
else:
return val
return pd.DataFrame.from_records(
(
(
node.entry_time, node.cpu, node.func_name, FrozenDict(node.tags), node.tagged_name,
*(
get_metric(node, metric)
for metric in metrics
)
)
for node in graph.all_nodes
),
columns=['Time', 'cpu', 'function', 'tags', 'tagged_name'] + metrics,
index='Time',
)
def compare_with_traces(self, others, normalize=True, **kwargs):
"""
Compare the :class:`~lisa.trace.Trace` it's called on with the other
traces passed as ``others``. The reference is the trace it's called on.
:returns: a :class:`lisa.stats.Stats` object just like
:meth:`profile_stats`.
:param others: List of traces to compare against.
:type others: list(lisa.trace.Trace)
:Variable keyword arguments: Forwarded to :meth:`profile_stats`.
"""
ref = self.trace
traces = [ref] + list(others)
paths = [
trace.trace_path
for trace in traces
]
common_prefix_len = len(os.path.commonprefix(paths))
common_suffix_len = len(os.path.commonprefix(list(map(lambda x: str(reversed(x)), paths))))
def get_name(trace):
name = trace.trace_path[common_prefix_len:common_suffix_len]
if not name:
if trace is ref:
name = 'ref'
else:
name = str(traces.index(trace))
return name
def get_df(trace):
df = trace.analysis.functions.df_calls(normalize=normalize)
df = df.copy(deep=False)
df['trace'] = get_name(trace)
return df
df = df_merge(map(get_df, traces))
ref_group = {
'trace': get_name(ref)
}
return self._profile_stats_from_df(df, ref_group=ref_group, **kwargs)
@df_calls.used_events
def profile_stats(self, tag_df=None, normalize=True, ref_function=None, ref_tags=None, **kwargs):
"""
Create a :class:`lisa.stats.Stats` out of profiling information of the
trace.
:param tag_df: Dataframe of tags, forwarded to :meth:`df_calls`
:type tag_df: pandas.DataFrame or None
:param normalize: Normalize execution time according to CPU capacity,
forwarded to to :meth:`df_calls`
:type normalize: bool
:param metric: Name of the metric to use for statistics. Can be one of:
* ``self_time``: Time spent in the function, not accounting for
time spent in children
* ``cum_time``: Total time spent in the function, including the
time spent in children.
Defaults to ``self_time``.
:type metric: str
:param functions: Restrict the statistics to the given list of
function.
:type functions: list(str) or None
:param ref_function: Function to compare to.
:type ref_function: str or None
:param ref_tags: Function tags to compare to. Ignored if ``ref_function
is None``.
:type ref_tags: dict(str, set(object)) or None
:param cpus: List of CPUs where the functions were called to take into
account. If left to ``None``, all CPUs are considered.
:type cpus: list(int) or None
:param per_cpu: If ``True``, the per-function statistics are separated
for each CPU they ran on. This is useful if the frequency was fixed and
the only variation in speed was coming from the CPU it ran on.
:type per_cpu: bool or None
:param tags: Restrict the statistics to the function tagged with the
given tag values. If a function has multiple values for a given tag
and one of the value is in ``tags``, the function is selected.
:type tags: dict(str, object)
:Variable keyword arguments: Forwarded to :class:`lisa.stats.Stats`.
.. note:: Recursive calls are treated as if they were inlined in their
callers. This means that the count of calls will be counting the
toplevel calls only, and that the ``self_time`` for a recursive
function is directly linked to how much time each level consumes
multiplied by the number of levels. ``cum_time`` will also be
tracked on the top-level call only to provide a more accurate
result.
"""
df = self.df_calls(tag_df=tag_df, normalize=normalize)
if ref_function:
ref_tags = ref_tags or {}
ref_group = {
'f': _CallGraphNode.format_name(ref_function, ref_tags)
}
else:
ref_group = None
return self._profile_stats_from_df(df, ref_group=ref_group, **kwargs)
@staticmethod
def _profile_stats_from_df(df, metric='self_time', functions=None, per_cpu=True, cpus=None, tags=None, **kwargs):
metrics = _CallGraphNode._METRICS
# Get rid of the other value columns to avoid treating them as
# tags
other_metrics = set(metrics) - {metric}
if functions:
df = df[df['function'].isin(functions)]
if cpus is not None:
df = df[df['cpu'].isin(cpus)]
if tags:
# Select all rows that are a subset of the given tags
def select_tag(row_tags):
return all(
val in row_tags.get(tag, [])
for tag, val in tags.items()
)
df = df[df['tags'].apply(select_tag)]
df = df.copy(deep=False)
# Use tagged_name for display
df['f'] = df['tagged_name']
to_drop = list(other_metrics) + ['tags', 'function', 'tagged_name']
# Calls are already uniquely identified by their timestamp, so grouping
# per CPU is optional
if not per_cpu:
to_drop.append('cpu')
df = df.drop(columns=to_drop)
df['unit'] = 's'
index_name = df.index.name
df = df.reset_index()
return Stats(
df,
agg_cols=[index_name],
value_col=metric,
**kwargs,
)
class _CallGraph:
class _EVENT(IntEnum):
"""
To be used as events for the dataframe passed to
:meth:`from_df`.
"""
ENTRY = 1
"""Enter the given function"""
EXIT = 2
"""Exit the given function"""
SET_TAG = 3
"""
Tag the current call graph path (parents and
children) with the given value
"""
SET_CAPACITY = 4
"""
Set the capacity of the current CPU. Values are between 0 and
:attr:`lisa.pelt.PELT_SCALE`.
"""
def __init__(self, cpu_nodes):
self.cpu_nodes = cpu_nodes
@property
def all_nodes(self):
return chain.from_iterable(
node.indirect_children
for node in self.cpu_nodes.values()
)
@classmethod
def from_df(cls, df, thread_root_functions=None, ts_cols=('calltime', 'rettime')):
"""
Build a :class:`_CallGraph` from a :class:`pandas.DataFrame` with the
following columns:
* ``event``: One of :class:`_CallGraph._EVENT` enumeration.
* ``func_name``: Name of the function for ``entry`` and ``exit``
events.
* ``tags``: ``dict(str, object)`` of tags for ``tag`` event.
:param thread_root_functions: Functions that are considered to be a
root of threads. When they appear in the callgraph, the profiler
will consider the current function to be preempted and will not
register the call as a child of it and will avoid to count it in
the cumulative time.
:type thread_root_functions: list(str) or None
:param ts_cols: Name of the columns for the
:attr:`_CallGraph._EVENT.EXIT` rows that contain timestamps for
entry and exit. If they are provided, they will be used instead of
the index.
:type ts_cols: tuple(str) or None
"""
thread_root_functions = set(thread_root_functions) if thread_root_functions else set()
def make_visitor():
_max_thread = -1
def make_thread():
nonlocal _max_thread
_max_thread += 1
return _max_thread
root_node = _CallGraphNode(
func_name=None,
parent=None,
cpu=None,
cpu_capacity=None,
logical_thread=make_thread(),
)
curr_node = root_node
# This is expected to be overriden right away by a SET_CAPACITY
# event
curr_capacity = PELT_SCALE
event_enum = cls._EVENT
def visit(row):
nonlocal curr_node, curr_capacity
curr_event = row['event']
if curr_event == event_enum.ENTRY:
func_name = row['func_name']
cpu = row['__cpu']
# If we got preempted by a function that is considered to
# be part of different logical thread (e.g. the toplevel
# function of an ISR), create a new ID
if func_name in thread_root_functions:
logical_thread = make_thread()
# Otherwise, just inherit it from the parent
else:
logical_thread = curr_node.logical_thread
child = _CallGraphNode(
func_name=func_name,
cpu=cpu,
parent=curr_node,
cpu_capacity=curr_capacity,
entry_time=row.name,
logical_thread=logical_thread,
)
curr_node._children.append(child)
curr_node = child
elif curr_event == event_enum.EXIT:
# We are trying to exit the root, which is probably the sign of
# a missing entry event (could have been cropped out of the
# trace). We therefore just ignore it.
if curr_node is not root_node:
# That node is unusable for stats, since the function
# used to enter the call is not the same one as for the
# exit. This usually means that the kernel returned to
# userspace in between.
if row['func_name'] != curr_node.func_name:
curr_node.valid_metrics = False
if ts_cols is None:
curr_node.exit_time = row.name
else:
entry_ts, exit_ts = ts_cols
curr_node.entry_time = row[entry_ts] * 1e-9
curr_node.exit_time = row[exit_ts] * 1e-9
curr_node = curr_node.parent
elif curr_event == event_enum.SET_TAG:
tags = row['tags']
curr_node.set_tags(tags)
elif curr_event == event_enum.SET_CAPACITY:
curr_capacity = row['capacity']
else:
raise ValueError(f'Unknown event "{curr_event}"')
def finalize(df):
# Fixup the exit time if there were missing exit events
if curr_node is not root_node:
last_time = df.index[-1]
for node in chain([curr_node], curr_node.parents):
node.exit_time = last_time
node.valid_metrics = False
root_children = root_node.children
if root_children:
root_node.entry_time = min(map(attrgetter('entry_time'), root_children))
root_node.exit_time = max(map(attrgetter('exit_time'), root_children))
else:
root_node.entry_time = 0
root_node.exit_time = 0
return (root_node, visit, finalize)
def build_graph(subdf):
root_node, visitor, finalizer = make_visitor()
subdf.apply(visitor, axis=1)
finalizer(subdf)
return root_node
return cls(
cpu_nodes = {
cpu: build_graph(subdf)
for cpu, subdf in df.groupby('__cpu', observed=True)
}
)
class _CallGraphNode(Mapping):
"""
Represent a function call extracted from some profiling information.
"""
__slots__ = [
'func_name',
'cpu',
'cpu_capacity',
'_tags',
'_children',
'parent',
'logical_thread',
'entry_time',
'exit_time',
'valid_metrics',
'__weakref__',
]
_METRICS = sorted((
'cum_time',
'self_time',
))
def __init__(self, func_name, parent, logical_thread, cpu, cpu_capacity, entry_time=None, exit_time=None, valid_metrics=True):
self.func_name = func_name
self.cpu = cpu
self.cpu_capacity = cpu_capacity
self.parent = parent
self.logical_thread = logical_thread
self._children = []
self._tags = {}
self.entry_time = entry_time
self.exit_time = exit_time
self.valid_metrics = valid_metrics
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
@property
@memoized
def _expanded_children(self):
def visit(node):
children = node._children
children_visit = map(visit, children)
is_recursive, children_expansion = unzip_into(2, children_visit)
# Check if we are part of any recursive chain
is_recursive = any(is_recursive) or node.func_name == self.func_name
# If we are part of a recursion chain, expand all of our children
# so that they are reparented into our caller
if is_recursive:
expansion = list(chain.from_iterable(children_expansion))
else:
expansion = [node]
return (is_recursive, expansion)
return visit(self)[1]
@property
@memoized
def children(self):
return [
child
for child in self._expanded_children
if not self._is_preempted_by(child)
]
@property
def _preempting_children(self):
return {
child
for child in self._expanded_children
if self._is_preempted_by(child)
}
def _is_preempted_by(self, node):
return self.logical_thread != node.logical_thread
def _str(self, idt):
idt_str = idt * ' '
if self.children:
children = ':\n' + '\n'.join(child._str(idt + 1) for child in self.children)
else:
children = ''
return f'{idt_str}{self.func_name}, self={self["self_time"]}s cum={self["cum_time"]}s tags={self.tags}{children}'
def __str__(self):
return self._str(0)
@property
def tagged_name(self):
return self.format_name(self.func_name, self.tags)
@staticmethod
def format_name(func_name, tags):
tags = tags or {}
tags = ', '.join(
f'{tag}={"|".join(map(str, vals))}'
for tag, vals in sorted(tags.items())
)
tags = f' ({tags})' if tags else ''
return f'{func_name}{tags}'
@memoized
def __getitem__(self, key):
if not self.valid_metrics:
return np.NaN
delta = self.exit_time - self.entry_time
if key == 'self_time':
return delta - sum(
node.exit_time - node.entry_time
# Substract the time spent in all the children, including the
# ones that preempted us
for node in self._expanded_children
)
elif key == 'cum_time':
# Define cum_time in terms of self_time, so that preempting
# children are properly accounted for recurisvely
return self['self_time'] + sum(
node['cum_time']
for node in self.children
)
else:
raise KeyError(f'Unknown metric "{key}"')
def __iter__(self):
return iter(self._METRICS)
def __len__(self):
return len(self._METRICS)
@property
def _inherited_tags(self):
def merge_tags(tags1, tags2):
common_keys = tags1.keys() & tags2.keys()
new = {
tag: tags1[tag] | tags2[tag]
for tag in common_keys
}
for tags in (tags1, tags2):
new.update({
tag: tags[tag]
for tag in tags.keys() - common_keys
})
return new
# Since merge_tags() is commutative (merge_tags(a, b) == merge_tags(b,
# a)), we don't need any specific ordering on the parents
nodes = chain(self.parents, self.indirect_children)
tags = reduce(merge_tags, map(attrgetter('_tags'), nodes), {})
return tags
@property
@memoized
def tags(self):
return dict(
(key, frozenset(vals))
for key, vals in {
**self._inherited_tags,
**self._tags,
}.items()
)
@property
def parents(self):
parent = self.parent
if parent is not None:
yield parent
yield from parent.parents
@property
def indirect_children(self):
for child in self.children:
yield child
yield from child.indirect_children
def set_tags(self, tags):
for tag, val in tags.items():
self._tags.setdefault(tag, set()).add(val)
class JSONStatsFunctionsAnalysis(AnalysisHelpers):
"""
Support for kernel functions profiling and analysis
:param stats_path: Path to JSON function stats as returned by devlib
:meth:`devlib.collector.ftrace.FtraceCollector.get_stats`
:type stats_path: str
"""
name = 'functions_json'
def __init__(self, stats_path):
self.stats_path = stats_path
# Opening functions profiling JSON data file
with open(self.stats_path) as f:
stats = json.load(f)
# Build DataFrame of function stats
frames = {}
for cpu, data in stats.items():
frames[int(cpu)] = pd.DataFrame.from_dict(data, orient='index')
# Build and keep track of the DataFrame
self._df = pd.concat(list(frames.values()),
keys=list(frames.keys()))
def get_default_plot_path(self, **kwargs):
return super().get_default_plot_path(
default_dir=os.path.dirname(self.stats_path),
**kwargs,
)
def df_functions_stats(self, functions=None):
"""
Get a DataFrame of specified kernel functions profile data
For each profiled function a DataFrame is returned which reports stats
on kernel functions execution time. The reported stats are per-CPU and
includes: number of times the function has been executed (hits),
average execution time (avg), overall execution time (time) and samples
variance (s_2).
By default returns a DataFrame of all the functions profiled.
:param functions: the name of the function or a list of function names
to report
:type functions: list(str)
"""
df = self._df
if functions:
return df.loc[df.index.get_level_values(1).isin(functions)]
else:
return df
@AnalysisHelpers.plot_method()
def plot_profiling_stats(self, functions: str=None, axis=None, local_fig=None, metrics: str='avg'):
"""
Plot functions profiling metrics for the specified kernel functions.
For each speficied metric a barplot is generated which report the value
of the metric when the kernel function has been executed on each CPU.
By default all the kernel functions are plotted.
:param functions: the name of list of name of kernel functions to plot
:type functions: str or list(str)
:param metrics: the metrics to plot
avg - average execution time
time - total execution time
:type metrics: list(str)
"""
df = self.df_functions_stats(functions)
# Check that all the required metrics are acutally availabe
available_metrics = df.columns.tolist()
if not set(metrics).issubset(set(available_metrics)):
msg = f'Metrics {(set(metrics) - set(available_metrics))} not supported, available metrics are {available_metrics}'
raise ValueError(msg)
for metric in metrics:
if metric.upper() == 'AVG':
title = 'Average Completion Time per CPUs'
ylabel = 'Completion Time [us]'
if metric.upper() == 'TIME':
title = 'Total Execution Time per CPUs'
ylabel = 'Execution Time [us]'
data = df[metric.casefold()].unstack()
data.plot(kind='bar',
ax=axis, figsize=(16, 8), legend=True,
title=title, table=True)
axis.set_ylabel(ylabel)
axis.get_xaxis().set_visible(False)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| credp/lisa | lisa/analysis/functions.py | Python | apache-2.0 | 32,267 | [
"VisIt"
] | 37ca2d096019a9aa1d7776b4990420996766ca5adc95f15c6dad72d5de9bb64f |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para unsoloclic
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import urlparse
from core import config
from core import logger
from core import scrapertools
from core.item import Item
DEBUG = config.get_setting("debug")
def mainlist(item):
logger.info("[unsoloclic.py] mainlist")
item.url="http://unsoloclic.info";
return novedades(item)
def novedades(item):
logger.info("[unsoloclic.py] novedades")
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
'''
<div class="post-45732 post type-post status-publish format-standard hentry category-2012 category-blu-ray category-mkv-hd720p" id="post-45732">
<h2 class="title"><a href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="bookmark" title="Permanent Link to Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD">Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD</a></h2>
<div class="postdate"><img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/date.png" /> noviembre 5th, 2012
<!--
<img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/user.png" /> unsoloclic
-->
</div>
<div class="entry">
<p><a href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="attachment wp-att-45737"><img src="http://unsoloclic.info/wp-content/uploads/2012/11/Ek-Tha-Tiger-2012.jpg" alt="" title="Ek Tha Tiger (2012)" width="500" height="629" class="aligncenter size-full wp-image-45737" /></a></p>
<h2 style="text-align: center;"></h2>
<div class="readmorecontent">
<a class="readmore" href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="bookmark" title="Permanent Link to Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD">Seguir Leyendo</a>
</div>
</div>
</div><!--/post-45732-->
'''
'''
<div class="post-45923 post type-post status-publish format-standard hentry category-2012 category-blu-ray category-comedia category-drama category-mkv category-mkv-hd720p category-romance tag-chris-messina tag-jenna-fischer tag-lee-kirk tag-the-giant-mechanical-man-pelicula tag-topher-grace" id="post-45923">
<h2 class="title"><a href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/" rel="bookmark" title="Permanent Link to The Giant Mechanical Man (2012) BluRay 720p HD">The Giant Mechanical Man (2012) BluRay 720p HD</a></h2>
<div class="postdate"><img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/date.png" /> diciembre 24th, 2012
<!--
<img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/user.png" /> deportv
-->
</div>
<div class="entry">
<p style="text-align: center;"><a href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/"><img class="aligncenter size-full wp-image-45924" title="Giant Michanical Man Pelicula Descargar" src="http://unsoloclic.info/wp-content/uploads/2012/12/Giant-Michanical-Man-Pelicula-Descargar.jpg" alt="" width="380" height="500" /></a></p>
<p style="text-align: center;">
<div class="readmorecontent">
<a class="readmore" href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/" rel="bookmark" title="Permanent Link to The Giant Mechanical Man (2012) BluRay 720p HD">Seguir Leyendo</a>
</div>
</div>
</div><!--/post-45923-->
'''
patron = '<div class="post[^"]+" id="post-\d+">[^<]+'
patron += '<h2 class="title"><a href="([^"]+)" rel="bookmark" title="[^"]+">([^<]+)</a></h2>[^<]+'
patron += '<div class="postdate">.*?</div>[^<]+'
patron += '<div class="entry">[^<]+'
patron += '<p[^<]+<a[^<]+<img.*?src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
'''
<a href="http://unsoloclic.info/page/2/" >« Peliculas anteriores</a>
'''
patron = '<a href="([^"]+)" >\«\; Peliculas anteriores</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for match in matches:
scrapedtitle = ">> Página siguiente"
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,match)
scrapedthumbnail = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=item.channel, action="novedades", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def findvideos(item):
logger.info("[unsoloclic.py] findvideos")
data = scrapertools.cache_page(item.url)
itemlist=[]
#<a href="http://67cfb0db.linkbucks.com"><img title="billionuploads" src="http://unsoloclic.info/wp-content/uploads/2012/11/billonuploads2.png" alt="" width="380" height="50" /></a></p>
#<a href="http://1bd02d49.linkbucks.com"><img class="colorbox-57103" title="Freakeshare" alt="" src="http://unsoloclic.info/wp-content/uploads/2013/01/freakshare.png" width="390" height="55" /></a></p>
patron = '<a href="(http.//[a-z0-9]+.linkbucks.c[^"]+)[^>]+><img.*?title="([^"]+)".*?src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for url,servertag,serverthumb in matches:
itemlist.append( Item(channel=item.channel, action="play", server="linkbucks", title=servertag+" [linkbucks]" , url=url , thumbnail=serverthumb , plot=item.plot , folder=False) )
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
if videoitem.server!="linkbucks":
videoitem.channel=item.channel
videoitem.action="play"
videoitem.folder=False
videoitem.title = "["+videoitem.server+"]"
return itemlist
def play(item):
logger.info("[unsoloclic.py] play")
itemlist=[]
if item.server=="linkbucks":
logger.info("Es linkbucks")
# Averigua el enlace
from servers.decrypters import linkbucks
location = linkbucks.get_long_url(item.url)
logger.info("location="+location)
# Extrae la URL de saltar el anuncio en adf.ly
if location.startswith("http://adf"):
# Averigua el enlace
from servers.decrypters import adfly
location = adfly.get_long_url(location)
logger.info("location="+location)
from core import servertools
itemlist=servertools.find_video_items(data=location)
for videoitem in itemlist:
videoitem.channel=item.channel
videoitem.folder=False
else:
itemlist.append(item)
return itemlist
| Hernanarce/pelisalacarta | python/main-classic/channels/unsoloclic.py | Python | gpl-3.0 | 7,369 | [
"ADF"
] | b131d864cebdc0529d22896931144ed7a2817ba1d25b24c6d1d7efd93071e666 |
# Copyright (c) 2014 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this list
# of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Fan, Yugang <yugang.fan@intel.com>
import time
import json
import re
import colorsys
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException,
NoAlertPresentException,
WebDriverException)
from atip.tizen import tizen
from atip.common import common
try:
from urlparse import urljoin, urlparse
except ImportError:
from urllib.parse import urljoin, urlparse
class WebAPP(common.APP):
def __init__(self, app_config=None, app_name=None):
self.driver = None
self.app_type = common.APP_TYPE_WEB
self.app_name = app_name
self.app_id = ""
self.text_value = {}
apk_activity_name = ""
apk_pkg_name = ""
if "platform" in app_config and "name" in app_config["platform"]:
if app_config["platform"]["name"].upper().find('TIZEN') >= 0:
self.app_id = tizen.get_appid_by_name(
self.app_name, app_config["platform"], app_config["tizen_user"])
if app_config["platform"]["name"].upper().find('ANDROID') >= 0:
self.app_name = self.app_name.replace("-", "_")
apk_name_update = "".join(
[i.capitalize() for i in self.app_name.split("_") if i])
apk_activity_name = ".%sActivity" % apk_name_update
apk_pkg_name = "org.xwalk.%s" % self.app_name
app_config_str = json.dumps(app_config).replace(
"TEST_APP_NAME", self.app_name).replace(
"TEST_APP_ID", self.app_id).replace(
"TEST_PKG_NAME", apk_pkg_name).replace(
"TEST_ACTIVITY_NAME", apk_activity_name)
self.app_config = json.loads(app_config_str)
if "url-prefix" in app_config:
self.url_prefix = app_config["url-prefix"]
else:
self.url_prefix = ""
def __get_element_by_xpath(self, xpath, display=True):
try:
element = self.driver.find_element_by_xpath(xpath)
if display:
try:
if element.is_displayed():
return element
except StaleElementReferenceException:
pass
else:
return element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_key_attr(self, key, attr, display=True):
xpath = "//*[@%s='%s']" % (attr, key)
try:
element = self.driver.find_element_by_xpath(xpath)
if display:
try:
if element.is_displayed():
return element
except StaleElementReferenceException:
pass
else:
return element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_tag(self, key, display=True):
try:
element = self.driver.find_element_by_tag(key)
return element
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_key(self, key, display=True):
try:
for i_element in self.driver.find_elements_by_xpath(str(
"//*[@id='%(key)s']|"
"//*[@name='%(key)s']|"
"//*[@value='%(key)s']|"
"//*[contains(@class, '%(key)s')]|"
"//div[contains(text(), '%(key)s')]|"
"//button[contains(text(), '%(key)s')]|"
"//input[contains(text(), '%(key)s')]|"
"//textarea[contains(text(), '%(key)s')]|"
"//a[contains(text(), '%(key)s')]") % {'key': key}):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_keys(self, key_p, key_c, display=True):
try:
for i_element in self.driver.find_elements_by_xpath(str(
"//*[@id='%(key)s']|"
"//*[@name='%(key)s']|"
"//*[@value='%(key)s']|"
"//*[contains(@class, '%(key)s')]|"
"//div[contains(text(), '%(key)s')]|"
"//button[contains(text(), '%(key)s')]|"
"//input[contains(text(), '%(key)s')]|"
"//textarea[contains(text(), '%(key)s')]|"
"//a[contains(text(), '%(key)s')]") % {'key': key_p}):
get_element = False
if display:
try:
if i_element.is_displayed():
get_element = True
except StaleElementReferenceException:
pass
else:
get_element = True
if get_element:
print "%s ++ %s" % (i_element.get_attribute("id"), i_element.get_attribute("class"))
for ii_element in i_element.find_elements_by_xpath(str(
"./*[@id='%(key)s']|"
"./*[@name='%(key)s']|"
"./*[@value='%(key)s']|"
"./*[contains(@class, '%(key)s')]|"
"./div[contains(text(), '%(key)s')]|"
"./button[contains(text(), '%(key)s')]|"
"./input[contains(text(), '%(key)s')]|"
"./textarea[contains(text(), '%(key)s')]|"
"./a[contains(text(), '%(key)s')]") % {'key': key_c}):
if display:
try:
if ii_element.is_displayed():
return ii_element
except StaleElementReferenceException:
pass
else:
return ii_element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __check_normal_text(self, text, display=True):
try:
for i_element in self.driver.find_elements_by_xpath(str(
'//*[@value="{text}"]|'
'//*[contains(normalize-space(.),"{text}") '
'and not(./*[contains(normalize-space(.),"{text}")])]'
.format(text=text))):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
except Exception as e:
print "Failed to get element: %s" % e
return None
def __check_normal_text_element(self, text, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
try:
for i_element in element.find_elements_by_xpath(str(
'//*[@value="{text}"]|'
'//*[contains(normalize-space(.),"{text}") '
'and not(./*[contains(normalize-space(.),"{text}")])]'
.format(text=text))):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
except Exception as e:
print "Failed to get element: %s" % e
return None
def compare_two_values(self, first=None, second=None):
try:
if eval(self.text_value[first]) < eval(self.text_value[second]):
return True
else:
return False
except Exception as e:
print "Failed to compare these two param: %s" % e
return False
def save_content(self, p_name=None, key=None):
try:
js_script = 'var style=document.getElementById(\"' + key + '\").innerHTML; return style'
style = self.driver.execute_script(js_script)
self.text_value[p_name] = style
return True
except Exception as e:
print "Failed to get element: %s" % e
return False
def launch_app(self):
try:
desired_capabilities = self.app_config["desired-capabilities"]
self.driver = WebDriver(
str(self.app_config["driver-url"]), desired_capabilities)
except Exception as e:
print "Failed to launch %s: %s" % (self.app_name, e)
return False
return True
def switch_url(self, url, with_prefix=True):
if with_prefix:
url = urljoin(self.url_prefix, url)
try:
self.driver.get(url)
except Exception as e:
print "Failed to visit %s: %s" % (url, e)
return False
return True
def title(self):
try:
return self.driver.title
except Exception as e:
print "Failed to get title: %s" % e
return None
def current_url(self):
try:
return self.driver.current_url
except Exception as e:
print "Failed to get current url: %s" % e
return None
def reload(self):
self.driver.refresh()
return True
def back(self):
self.driver.back()
return True
def forward(self):
self.driver.forward()
return True
def check_normal_text_timeout(self, text=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text(text, display):
return True
time.sleep(0.2)
return False
def check_normal_text_element_timeout(
self, text=None, key=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text_element(text, key, display):
return True
time.sleep(0.2)
return False
def check_normal_text_element_timeout_with_color(
self, text=None, key=None, color=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text_element(text, key, display):
if self.get_element_color(key, color):
return True
time.sleep(0.2)
return False
def get_element_color(self, key=None, color=None, display=True):
try:
js_script = 'var style=document.getElementById(\"' + key + '\").getAttribute(\"style\"); return style'
style = self.driver.execute_script(js_script)
if style.find(color) != -1:
return True
except Exception as e:
print "Failed to get element: %s" % e
return False
def check_content_type(self, key=None, display=True):
try:
js_script = 'var text=document.getElementById(\"' + key + '\").innerText; return text'
text = self.driver.execute_script(js_script)
if text.strip() == '':
return 'none'
number = re.match(r'(-?\d+)(\.\d+)?',text)
if number:
if "." in text:
return "float"
else:
return "int"
else:
if text.upper() == "TRUE" or text.upper() == "FALSE":
return "boolean"
else:
return "string"
except Exception as e:
print "Failed to get element text: %s" % e
def press_element_by_key(self, key, display=True):
element = self.__get_element_by_key(key, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
element.click()
return True
return False
def press_element_by_keys(self, key_p, key_c, display=True):
element = self.__get_element_by_keys(key_p, key_c, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
element.click()
return True
return False
def press_element_by_key_attr(self, key, attr, display=True):
element = self.__get_element_by_key_attr(key, attr, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
element.click()
return True
return False
def click_element_by_keys(self, key_p, key_c, display=True):
element = self.__get_element_by_keys(key_p, key_c, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
ActionChains(self.driver).click(element).perform()
return True
return False
def click_element_by_key(self, key, display=True):
element = self.__get_element_by_key(key, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
ActionChains(self.driver).click(element).perform()
return True
return False
def click_element_coords(self, x, y, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
ActionChains(self.driver).move_to_element_with_offset(
element, x, y).click().perform()
return True
return False
def fill_element_by_key(self, key, text, display=True):
element = self.__get_element_by_key(key, display)
if element:
element.send_keys(text)
return True
return False
def check_checkbox_by_key(self, key, display=True):
element = self.__get_element_by_xpath(str(
"//input[@id='%(key)s'][@type='checkbox']|"
"//input[@name='%(key)s'][@type='checkbox']") % {'key': key}, display)
if element:
if not element.is_selected():
element.click()
return True
return False
def uncheck_checkbox_by_key(self, key, display=True):
element = self.__get_element_by_xpath(str(
"//input[@id='%(key)s'][@type='checkbox']|"
"//input[@name='%(key)s'][@type='checkbox']") % {'key': key}, display)
if element:
if element.is_selected():
element.click()
return True
return False
def get_alert_text(self):
try:
alert_element = self.driver.switch_to_alert()
if alert_element:
return alert_element.text
except Exception as e:
print "Failed to get alert text: %s" % e
return None
def check_alert_existing(self):
try:
self.driver.switch_to_alert().text
except NoAlertPresentException:
return False
return True
def accept_alert(self):
try:
alert_element = self.driver.switch_to_alert()
alert_element.accept()
return True
except Exception as e:
print "Failed to accept alert: %s" % e
return False
def quit(self):
if self.driver:
self.driver.quit()
def launch_webapp_by_name(context, app_name):
if not context.web_config:
assert False
if app_name in context.apps:
context.apps[app_name].quit()
context.apps.update({app_name: WebAPP(context.web_config, app_name)})
context.app = context.apps[app_name]
if not context.app.launch_app():
assert False
assert True
| YongseopKim/crosswalk-test-suite | tools/atip/atip/web/web.py | Python | bsd-3-clause | 18,181 | [
"VisIt"
] | 36df8a608d6f60af79c2c4c69a2158ff029afbad6ad2c7de53e6bb27d388766f |
import numpy as np
from def_get_mags import get_zdistmod, get_kcorrect2, aper_and_comov, abs2lum, lumdensity, abs_mag
import math
from defclump import meanlum2, medlum2, get_error
import matplotlib.pyplot as plt
from def_mymath import halflight
def get_ind_lums(newdata, bands, aperture, scale=''):
import numpy as np
from def_get_mags import get_zdistmod, get_kcorrect2, aper_and_comov, abs2lum, lumdensity, abs_mag
import math
from defclump import meanlum2
from my_def_plots import halflight_plot, scatter_fit
from scipy import interpolate
import matplotlib.pyplot as plt
from def_mymath import halflight
Naps=len(aperture)
Ndat=len(newdata)
try:
redshifts=newdata['Z']
DM= get_zdistmod(newdata, 'Z')
except:
redshifts=newdata['Z_2']
DM= get_zdistmod(newdata, 'Z_2')
kcorrect=get_kcorrect2(newdata,'mag_forced_cmodel', '_err', bands, '','hsc_filters.dat',redshifts)
fig=plt.figure()
bigLI=[]
bigrad=[]
bigden=[]
for n in range(0, Ndat):
LI=[]
LI2=[]
lumdi=[]
string=str(n)
radkpc=aper_and_comov(aperture, redshifts[n])
print('redshifts is ', redshifts[n])
for a in range(0, Naps): #this goes through every aperture
ns=str(a)
print('aperture0',ns)
absg, absr, absi, absz, absy= abs_mag(newdata[n], 'mag_aperture0', kcorrect, DM[n], bands, ns, n)
Lumg, Lumr, Lumi, Lumz, Lumy=abs2lum(absg, absr, absi, absz, absy)
Lg, Lr, Li, Lz, Ly=lumdensity(Lumg, Lumr, Lumi, Lumz, Lumy, radkpc[a])
if scale== 'log':
print('getting logs')
logLumi=math.log10(Lumi)
logLi=math.log10(Li)
LI.append(logLumi)
lumdi.append(logLi)
else:
LI.append(Lumi)
lumdi.append(Li)
print('LI for ',n,' galaxy is ', LI)
bigLI.append(LI)
bigden.append(lumdi)
if scale== 'log':
lograd=[math.log10(radkpc[n]) for n in range(len(radkpc))]
bigrad.append(lograd)
else:
bigrad.append(radkpc)
bigLIs=np.array(bigLI)
bigrads=np.array(bigrad)
lumdensi=np.array(bigden)
return bigLIs, bigrads, lumdensi
def get_avg_lums(bigLIs, bigrads, lumdensi, type=''):
bigLIs.flatten() #luminosity
bigrads.flatten() #radii
lumdensi.flatten() #luminosity density
Naps=0.0
if type=='mean':
meanlum, radavg, bb=meanlum2(bigLIs, bigrads,Naps,scale='log')
meandens, radavg, bb=meanlum2(lumdensi, bigrads,Naps,scale='log')
err='bootstrap_stdv'
lumdenerr=get_error(lumdensi, bigrads, bb, error=err)
print('Mean Luminosity= ', meanlum)
print('Mean LumDensity=', meandens)
print('Binned Radii= ', radavg)
print('Standard Deviation= ', lumdenerr)
return meanlum, meandens, radavg, lumdenerr
if type== 'median':
medlum, radavg, bb=medlum2(bigLIs, bigrads)
medens, radavg, bb=medlum2(lumdensi, bigrads)
err='bootstrap_stdv'
lumdenerr=get_error(lumdensi, bigrads, bb, error=err)
print('Median Luminosity= ', medlum)
print('Median LumDensity=', medens)
print('Binned Radii= ', radavg)
print('Standard Deviation= ', lumdenerr)
return medlum, medens, radavg, lumdenerr
def get_halflight(bigLIs, bigrads):
print('Getting Halfs')
N=np.ndim(bigrads)
if N == 2:
print('Array is 2D')
halfrad=[]
for x in range(len(bigrads)):
#print('Galaxy #', str(x))
#print('Luminosity= ', bigLIs[x])
#print('Radii= ', bigrads[x])
rad=bigrads[x]
lum=bigLIs[x]
half=math.log10(10**np.max(lum)/2.0)
fhalf=halflight(rad,lum)
halfrad.append(fhalf)
halfrads=np.array(halfrad)
else:
halfrads=halflight(bigrads,bigLIs)
return halfrads
def get_slopes(lum, hx, x, y, error=None, names=None, smax=False):
import scipy.stats as stats
from def_mymath import my_linregress3
from my_def_plots import scatter_fit, simple_hist
mult=4
Ndim=np.ndim(x)
N=len(x)
if error is None:
print('No error was given')
error=np.ones((N, len(x[0])))
if names is None:
print('No names given')
if Ndim==2:
xcut=[]
ycut=[]
errcut=[]
for i in range(len(x)):
xrow=x[i]
yrow=y[i]
errow=error[i]
hhx=hx[i]
#merr=errow[xrow>=hhx]
#mx=xrow[xrow>=hhx]
#my=yrow[xrow>=hhx]
if smax== True:
hhx10=10**hhx
hhx2s=mult*hhx10
hhx2=math.log10(hhx2s)
bad=0
#print(hhx2, np.max(xrow))
if np.max(xrow) >= hhx2:
mx=xrow[(xrow>=hhx)&(xrow<=hhx2)]
my=yrow[(xrow>=hhx)&(xrow<=hhx2)]
merr=errow[(xrow>=hhx)&(xrow<=hhx2)]
if len(mx) >=4:
xcut.append(mx)
ycut.append(my)
errcut.append(merr)
else:
print('Upper Cut is Out of the Radius Range')
bad += 1
else:
merr=errow[xrow>=hhx]
mx=xrow[xrow>=hhx]
my=yrow[xrow>=hhx]
if len(mx) >=4:
xcut.append(mx)
ycut.append(my)
errcut.append(merr)
slopes=[]
intercepts=[]
errs=[]
for n in range(len(xcut)):
#print(len(x[n]), len(xcut[n]))
#slope, int, r_value, p_value, std_err = stats.linregress(xcut[n],ycut[n])
slope, int, std_err=my_linregress3(xcut[n], ycut[n], errcut[n])
slopes.append(slope)
intercepts.append(int)
errs.append(std_err)
return slopes, intercepts, errs
else: #for arrays of 1D *aka* the stacked profile
x=np.array(x)
print('r1/2 limit is ', hx)
print('xrange for stacked is ', x)
if error is None:
error=np.ones(N)
if smax== True:
hx10=10**hx
hx2s=mult*hx10
hx2=math.log10(hx2s)
print('upper limit is ', hx2)
if np.max(x) <= hx2:
print('Upper cut is out of the Radius range')
else:
xcut=x[(x>=hx)&(x<=hx2)]
ycut=y[(x>=hx)&(x<=hx2)]
errcut=error[(x>=hx)&(x<=hx2)]
else:
xcut=x[x>=hx]
ycut=y[x>=hx]
errcut=error[x>=hx]
print('Radii are= ', xcut)
print('R1/2 is= ', hx)
sl3, C3, std_err3=my_linregress3(xcut, ycut, errcut)
return sl3, C3, xcut, ycut, std_err3, errcut
def get_slopes1(data, lum, hx, x, y, error=None, names=None, smax=False):
import scipy.stats as stats
from def_mymath import my_linregress3
from my_def_plots import scatter_fit, simple_hist
get_vir_r= lambda M, ro, delta_c: (M/(4./3.*math.pi*rho*delta_c))**(1./3.)
mult=6
Ndim=np.ndim(x)
N=len(x)
if error is None:
print('Is this going through?')
error=np.ones((N, len(x[0])))
if names is None:
print('No names given')
if Ndim==2:
xcut=[]
ycut=[]
errcut=[]
bad=0
good=0
for i in range(len(x)):
xrow=x[i]
yrow=y[i]
Lum=lum[i]
errow=error[i]
hhx=hx[i]
if smax is True:
from astropy.cosmology import FlatLambdaCDM
from astropy import units as u
cosmo = FlatLambdaCDM(H0=100, Om0=0.3)
redshift=data['Z']
rhos=cosmo.critical_density(redshift[i]) #in g/cm^3
solMkpc3= u.solMass / u.kpc**3
rho=rhos.to(solMkpc3)
print('Critical Density= ', rho)
rho=rho.value
M2L=10
Lmax=np.max(Lum)
Mvir=M2L*10**Lmax
#Mvir=math.log10(Mvir)
#Mvir=M2L*Lmax
print('Virial Mass= ', Mvir)
delta_c=500
r_vir=get_vir_r(Mvir, rho, delta_c)
r_vir=math.log10(r_vir)
print('Radii= ', str(xrow))
print('Virial Radius is ', str(r_vir))
if np.max(xrow) >= r_vir:
mx=xrow[(xrow>=hhx)&(xrow<=r_vir)]
my=yrow[(xrow>=hhx)&(xrow<=r_vir)]
merr=errow[(xrow>=hhx)&(xrow<=r_vir)]
if len(mx) >=4:
xcut.append(mx)
ycut.append(my)
errcut.append(merr)
good +=1
else:
print('Not enough data')
else:
print('Upper Cut is Out of the Virial Radius Range')
#bad=sum(bad+1)
bad +=1
elif smax is False:
merr=errow[xrow>=hhx]
mx=xrow[xrow>=hhx]
my=yrow[xrow>=hhx]
if len(mx) >=4:
xcut.append(mx)
ycut.append(my)
errcut.append(merr)
else:
print('Not enough data')
print('Number of good profiles: ', good)
print('Number of bad profiles: ', bad)
print(len(x), len(xcut))
hi=hi
slopes=[]
intercepts=[]
errs=[]
for n in range(len(xcut)):
slope, int, std_err=my_linregress3(xcut[n], ycut[n], errcut[n])
slopes.append(slope)
intercepts.append(int)
errs.append(std_err)
return slopes, intercepts, errs
else:
x=np.array(x)
if error is None:
error=np.ones(N)
if smax is True:
hx10=10**hx
hx2s=mult*hx10
hx2=math.log10(hx2s)
if np.max(x) <= hx2:
print('Upper cut is out of the Radius range')
else:
xcut=x[(x>=hx)&(x<=hx2)]
ycut=y[(x>=hx)&(x<=hx2)]
errcut=error[(x>=hx)&(x<=hx2)]
else:
xcut=x[x>=hx]
ycut=y[x>=hx]
errcut=error[x>=hx]
sl3, C3, std_err3=my_linregress3(xcut, ycut, errcut)
return sl3, C3, xcut, ycut, std_err3, errcut
| anewmark/galaxy_dark_matter | def_clean.py | Python | mit | 8,425 | [
"Galaxy"
] | fd220559b97dad1fbc48ddac8d0455eea8ac52d0bf7ca37a73fbf6e22dcc22d9 |
#!/usr/bin/env python
#Dan Blankenberg
#%prog bounding_region_file mask_intervals_file intervals_to_mimic_file out_file mask_chr mask_start mask_end interval_chr interval_start interval_end interval_strand use_mask allow_strand_overlaps
import sys, random
from copy import deepcopy
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
import bx.intervals.io
import bx.intervals.intersection
import psyco_full
assert sys.version_info[:2] >= ( 2, 4 )
max_iters = 5
def stop_err( msg ):
sys.stderr.write( msg )
sys.exit()
#Try to add a random region
def add_random_region( mimic_region, bound, exist_regions, plus_mask, minus_mask, overlaps ):
region_length, region_strand = mimic_region
plus_count = plus_mask.count_range()
minus_count = minus_mask.count_range()
gaps = []
if region_strand == "-":
gaps = minus_mask.get_gaps( region_length )
else:
gaps = plus_mask.get_gaps( region_length )
while True:
try:
gap_length, gap_start, gap_end = gaps.pop( random.randint( 0, len( gaps ) - 1 ) )
except:
break
try:
start = random.randint( bound.start + gap_start, bound.start + gap_end - region_length - 1 )
except ValueError, ve:
stop_err( "Exception thrown generating random start value: %s" %str( ve ) )
end = start + region_length
try_plus_mask = plus_mask.copy()
try_minus_mask = minus_mask.copy()
if region_strand == "-":
try_minus_mask.set_range( start - bound.start, end - bound.start )
else:
try_plus_mask.set_range( start - bound.start, end - bound.start )
rand_region = bx.intervals.io.GenomicInterval( None, [bound.chrom, start, end, region_strand], 0, 1, 2, 3, "+", fix_strand=True )
if try_plus_mask.count_range() == plus_count + region_length or try_minus_mask.count_range() == minus_count + region_length:
if overlaps in ["strand", "all"]: #overlaps allowed across strands
exist_regions.append( rand_region )
if overlaps == "strand":
return exist_regions, True, try_plus_mask, try_minus_mask
else: #overlaps allowed everywhere
return exist_regions, True, plus_mask, minus_mask
else: #no overlapping anywhere
exist_regions.append( rand_region )
if region_strand == "-":
return exist_regions, True, try_minus_mask.copy(), try_minus_mask
else:
return exist_regions, True, try_plus_mask, try_plus_mask.copy()
return exist_regions, False, plus_mask, minus_mask
def main():
includes_strand = False
region_uid = sys.argv[1]
mask_fname = sys.argv[2]
intervals_fname = sys.argv[3]
out_fname = sys.argv[4]
try:
mask_chr = int( sys.argv[5] ) - 1
except:
stop_err( "'%s' is an invalid chrom column for 'Intervals to Mask' dataset, click the pencil icon in the history item to edit column settings." % str( sys.argv[5] ) )
try:
mask_start = int( sys.argv[6] ) - 1
except:
stop_err( "'%s' is an invalid start column for 'Intervals to Mask' dataset, click the pencil icon in the history item to edit column settings." % str( sys.argv[6] ) )
try:
mask_end = int( sys.argv[7] ) - 1
except:
stop_err( "'%s' is an invalid end column for 'Intervals to Mask' dataset, click the pencil icon in the history item to edit column settings." % str( sys.argv[7] ) )
try:
interval_chr = int( sys.argv[8] ) - 1
except:
stop_err( "'%s' is an invalid chrom column for 'File to Mimick' dataset, click the pencil icon in the history item to edit column settings." % str( sys.argv[8] ) )
try:
interval_start = int( sys.argv[9] ) - 1
except:
stop_err( "'%s' is an invalid start column for 'File to Mimick' dataset, click the pencil icon in the history item to edit column settings." % str( sys.argv[9] ) )
try:
interval_end = int( sys.argv[10] ) - 1
except:
stop_err( "'%s' is an invalid end column for 'File to Mimick' dataset, click the pencil icon in the history item to edit column settings." % str( sys.argv[10] ) )
try:
interval_strand = int( sys.argv[11] ) - 1
includes_strand = True
except:
interval_strand = -1
if includes_strand:
use_mask = sys.argv[12]
overlaps = sys.argv[13]
else:
use_mask = sys.argv[11]
overlaps = sys.argv[12]
available_regions = {}
loc_file = "%s/regions.loc" % sys.argv[-1]
for i, line in enumerate( file( loc_file ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ):
fields = line.split( '\t' )
#read each line, if not enough fields, go to next line
try:
build = fields[0]
uid = fields[1]
description = fields[2]
filepath = fields[3]
available_regions[uid] = filepath
except:
continue
if region_uid not in available_regions:
stop_err( "Region '%s' is invalid." % region_uid )
region_fname = available_regions[region_uid].strip()
#set up bounding regions to hold random intervals
bounds = []
for bound in bx.intervals.io.NiceReaderWrapper( open( region_fname, 'r' ), chrom_col=0, start_col=1, end_col=2, fix_strand=True, return_header=False, return_comments=False ):
bounds.append( bound )
#set up length and number of regions to mimic
regions = [ [] for i in range( len( bounds ) ) ]
for region in bx.intervals.io.NiceReaderWrapper( open( intervals_fname, 'r' ), chrom_col=interval_chr, start_col=interval_start, end_col=interval_end, strand_col=interval_strand, fix_strand=True, return_header=False, return_comments=False ):
#loop through bounds, find first proper bounds then add
#if an interval crosses bounds, it will be added to the first bound
for i in range( len( bounds ) ):
if bounds[i].chrom != region.chrom:
continue
intersecter = bx.intervals.intersection.Intersecter()
intersecter.add_interval( bounds[i] )
if len( intersecter.find( region.start, region.end ) ) > 0:
regions[i].append( ( region.end - region.start, region.strand ) ) #add region to proper bound and go to next region
break
for region in regions:
region.sort()
region.reverse()
#read mask file
mask = []
if use_mask != "no_mask":
for region in bx.intervals.io.NiceReaderWrapper( open( mask_fname, 'r' ), chrom_col=mask_chr, start_col=mask_start, end_col=mask_end, fix_strand=True, return_header=False, return_comments=False ):
mask.append( region )
try:
out_file = open ( out_fname, "w" )
except:
stop_err( "Error opening output file '%s'." % out_fname )
i = 0
i_iters = 0
region_count = 0
best_regions = []
num_fail = 0
while i < len( bounds ):
i_iters += 1
#order regions to mimic
regions_to_mimic = regions[i][0:]
if len( regions_to_mimic ) < 1: #if no regions to mimic, skip
i += 1
i_iters = 0
continue
#set up region mask
plus_mask = Region( bounds[i].end - bounds[i].start )
for region in mask:
if region.chrom != bounds[i].chrom: continue
mask_start = region.start - bounds[i].start
mask_end = region.end - bounds[i].start
if mask_start >= 0 and mask_end > 0:
plus_mask.set_range( mask_start, mask_end )
minus_mask = plus_mask.copy()
random_regions = []
num_added = 0
for j in range( len( regions[i] ) ):
random_regions, added, plus_mask, minus_mask = add_random_region( regions_to_mimic[j], bounds[i], random_regions, plus_mask, minus_mask, overlaps )
if added:
num_added += 1
if num_added == len( regions_to_mimic ) or i_iters >= max_iters:
if len( best_regions ) > len( random_regions ):
random_regions = best_regions.copy()
num_fail += ( len( regions_to_mimic ) - len( random_regions ) )
i_iters = 0
best_regions = []
for region in random_regions:
print >>out_file, "%s\t%d\t%d\t%s\t%s\t%s" % ( region.chrom, region.start, region.end, "region_" + str( region_count ), "0", region.strand )
region_count += 1
else:
i -= 1
if len( best_regions ) < len( random_regions ):
best_regions = random_regions[:]
i+=1
out_file.close()
if num_fail:
print "After %i iterations, %i regions could not be added." % (max_iters, num_fail)
if use_mask == "use_mask":
print "The mask you have provided may be too restrictive."
class Region( list ):
"""
A list for on/off regions
"""
def __init__( self, size=0 ):
for i in range( size ):
self.append( False )
def copy( self ):
return deepcopy( self )
def set_range( self, start=0, end=None ):
if start < 0:
start = 0
if ( not end and end != 0 ) or end > len( self ):
end = len( self )
for i in range( start, end ):
self[i]=True
def count_range( self, start=0, end=None ):
if start < 0:
start = 0
if ( not end and end != 0 ) or end > len( self ):
end = len( self )
return self[start:end].count( True )
def get_gaps( self, min_size = 0 ):
gaps = []
start = end = 0
while True:
try:
start = self[end:].index( False ) + end
except:
break
try:
end = self[start:].index( True ) + start
except:
end = len( self )
if end > start and end - start >= min_size:
gaps.append( ( end - start, start, end ) )
gaps.sort()
gaps.reverse()
return gaps
if __name__ == "__main__": main()
| volpino/Yeps-EURAC | tools/encode/random_intervals_no_bits.py | Python | mit | 10,644 | [
"Galaxy"
] | 243eb5cd34ff4fa884b8111b0c98e4457a846d6b799532e706a51ccf72a2ec5b |
import ast
from .base import BaseAnalyzer, Result
DESCRIPTION = """
``{name}`` function has been deprecated in Django 1.2 and removed in 1.4. Use
``{propose}`` class instead.
"""
class TemplateLoadersVisitor(ast.NodeVisitor):
def __init__(self):
self.found = []
removed_items = {
'django.template.loaders.app_directories.load_template_source':
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.load_template_source':
'django.template.loaders.eggs.Loader',
'django.template.loaders.filesystem.load_template_source':
'django.template.loaders.filesystem.Loader',
}
def visit_Str(self, node):
if node.s in self.removed_items.keys():
self.found.append((node.s, node))
class TemplateLoadersAnalyzer(BaseAnalyzer):
def analyze_file(self, filepath, code):
if not isinstance(code, ast.AST):
return
visitor = TemplateLoadersVisitor()
visitor.visit(code)
for name, node in visitor.found:
propose = visitor.removed_items[name]
result = Result(
description = DESCRIPTION.format(name=name, propose=propose),
path = filepath,
line = node.lineno)
lines = self.get_file_lines(filepath, node.lineno, node.lineno)
for lineno, important, text in lines:
result.source.add_line(lineno, text, important)
result.solution.add_line(lineno, text.replace(name, propose), important)
yield result
| beni55/djangolint | project/lint/analyzers/template_loaders.py | Python | isc | 1,600 | [
"VisIt"
] | 25d915b031cadb47473b708e9035bfc4ce27080514d6f4b6f8e50a6921cd589d |
# coding: utf-8
"""
Acceptance tests for Studio's Setting pages
"""
from __future__ import unicode_literals
import os
from textwrap import dedent
from bok_choy.promise import EmptyPromise
from mock import patch
from nose.plugins.attrib import attr
from base_studio_test import StudioCourseTest
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.common.utils import add_enrollment_course_modes
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.pages.studio.settings_advanced import AdvancedSettingsPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.utils import get_input_value
from common.test.acceptance.tests.helpers import create_user_partition_json, element_has_text
from xmodule.partitions.partitions import Group
@attr(shard=8)
class ContentGroupConfigurationTest(StudioCourseTest):
"""
Tests for content groups in the Group Configurations Page.
There are tests for the experiment groups in test_studio_split_test.
"""
def setUp(self):
super(ContentGroupConfigurationTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 1 problems.
The problem is visible only to Group "alpha".
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def create_and_verify_content_group(self, name, existing_groups):
"""
Creates a new content group and verifies that it was properly created.
"""
self.assertEqual(existing_groups, len(self.group_configurations_page.content_groups))
if existing_groups == 0:
self.group_configurations_page.create_first_content_group()
else:
self.group_configurations_page.add_content_group()
config = self.group_configurations_page.content_groups[existing_groups]
config.name = name
# Save the content group
self.assertEqual(config.get_text('.action-primary'), "Create")
self.assertFalse(config.delete_button_is_present)
config.save()
self.assertIn(name, config.name)
return config
def test_no_content_groups_by_default(self):
"""
Scenario: Ensure that message telling me to create a new content group is
shown when no content groups exist.
Given I have a course without content groups
When I go to the Group Configuration page in Studio
Then I see "You have not created any content groups yet." message
"""
self.group_configurations_page.visit()
self.assertTrue(self.group_configurations_page.no_content_groups_message_is_present)
self.assertIn(
"You have not created any content groups yet.",
self.group_configurations_page.no_content_groups_message_text
)
def test_can_create_and_edit_content_groups(self):
"""
Scenario: Ensure that the content groups can be created and edited correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Create'
Then I see the new content is added and has correct data
And I click 'New Content Group' button
And I set the name and click the button 'Create'
Then I see the second content group is added and has correct data
When I edit the second content group
And I change the name and click the button 'Save'
Then I see the second content group is saved successfully and has the new name
"""
self.group_configurations_page.visit()
self.create_and_verify_content_group("New Content Group", 0)
second_config = self.create_and_verify_content_group("Second Content Group", 1)
# Edit the second content group
second_config.edit()
second_config.name = "Updated Second Content Group"
self.assertEqual(second_config.get_text('.action-primary'), "Save")
second_config.save()
self.assertIn("Updated Second Content Group", second_config.name)
def test_cannot_delete_used_content_group(self):
"""
Scenario: Ensure that the user cannot delete used content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I try to delete the Content Group with name "New Content Group"
Then I see the delete button is disabled.
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,',
'Content Group Partition',
[Group("0", 'alpha')],
scheme="cohort"
)
],
},
})
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('problem', "VISIBLE TO ALPHA", data=problem_data, metadata={"group_access": {0: [0]}}),
)
self.group_configurations_page.visit()
config = self.group_configurations_page.content_groups[0]
self.assertTrue(config.delete_button_is_disabled)
def test_can_delete_unused_content_group(self):
"""
Scenario: Ensure that the user can delete unused content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I delete the Content Group with name "New Content Group"
Then I see that there is no Content Group
When I refresh the page
Then I see that the content group has been deleted
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
self.assertTrue(config.delete_button_is_present)
self.assertEqual(len(self.group_configurations_page.content_groups), 1)
# Delete content group
config.delete()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
self.group_configurations_page.visit()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
def test_must_supply_name(self):
"""
Scenario: Ensure that validation of the content group works correctly.
Given I have a course without content groups
And I create new content group without specifying a name click the button 'Create'
Then I see error message "Content Group name is required."
When I set a name and click the button 'Create'
Then I see the content group is saved successfully
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.save()
self.assertEqual(config.mode, 'edit')
self.assertEqual("Group name is required", config.validation_message)
config.name = "Content Group Name"
config.save()
self.assertIn("Content Group Name", config.name)
def test_can_cancel_creation_of_content_group(self):
"""
Scenario: Ensure that creation of a content group can be canceled correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Cancel'
Then I see that there is no content groups in the course
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.name = "Content Group"
config.cancel()
self.assertEqual(0, len(self.group_configurations_page.content_groups))
def test_content_group_empty_usage(self):
"""
Scenario: When content group is not used, ensure that the link to outline page works correctly.
Given I have a course without content group
And I create new content group
Then I see a link to the outline page
When I click on the outline link
Then I see the outline page
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
config.toggle()
config.click_outline_anchor()
# Waiting for the page load and verify that we've landed on course outline page
self.outline_page.wait_for_page()
@attr(shard=5)
class EnrollmentTrackModeTest(StudioCourseTest):
def setUp(self, is_staff=True, test_xss=True):
super(EnrollmentTrackModeTest, self).setUp(is_staff=is_staff)
self.audit_track = "Audit"
self.verified_track = "Verified"
self.staff_user = self.user
def test_all_course_modes_present(self):
"""
This test is meant to ensure that all the course modes show up as groups
on the Group configuration page within the Enrollment Tracks section.
It also checks to make sure that the edit buttons are not available.
"""
add_enrollment_course_modes(self.browser, self.course_id, ['audit', 'verified'])
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
self.assertTrue(group_configurations_page.enrollment_track_section_present)
# Make sure the edit buttons are not available.
self.assertFalse(group_configurations_page.enrollment_track_edit_present)
groups = group_configurations_page.get_enrollment_groups()
for g in [self.audit_track, self.verified_track]:
self.assertTrue(g in groups)
def test_one_course_mode(self):
"""
The purpose of this test is to ensure that when there is 1 or fewer course modes
the enrollment track section is not shown.
"""
add_enrollment_course_modes(self.browser, self.course_id, ['audit'])
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
self.assertFalse(group_configurations_page.enrollment_track_section_present)
groups = group_configurations_page.get_enrollment_groups()
self.assertEqual(len(groups), 0)
@attr(shard=8)
class AdvancedSettingsValidationTest(StudioCourseTest):
"""
Tests for validation feature in Studio's advanced settings tab
"""
def setUp(self):
super(AdvancedSettingsValidationTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.type_fields = ['Course Display Name', 'Advanced Module List', 'Discussion Topic Mapping',
'Maximum Attempts', 'Course Announcement Date']
# Before every test, make sure to visit the page first
self.advanced_settings.visit()
def test_modal_shows_one_validation_error(self):
"""
Test that advanced settings don't save if there's a single wrong input,
and that it shows the correct error message in the modal.
"""
# Feed an integer value for String field.
# .set method saves automatically after setting a value
course_display_name = self.advanced_settings.get('Course Display Name')
self.advanced_settings.set('Course Display Name', 1)
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(['Course Display Name'])
self.advanced_settings.refresh_and_wait_for_load()
self.assertEquals(
self.advanced_settings.get('Course Display Name'),
course_display_name,
'Wrong input for Course Display Name must not change its value'
)
def test_modal_shows_multiple_validation_errors(self):
"""
Test that advanced settings don't save with multiple wrong inputs
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(self.type_fields)
self.advanced_settings.refresh_and_wait_for_load()
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Wrong input for Advanced Settings Fields must not change its value'
)
def test_undo_changes(self):
"""
Test that undo changes button in the modal resets all settings changes
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
# Let modal popup
self.advanced_settings.wait_for_modal_load()
# Click Undo Changes button
self.advanced_settings.undo_changes_via_modal()
# Check that changes are undone
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Undoing Should revert back to original value'
)
def test_manual_change(self):
"""
Test that manual changes button in the modal keeps settings unchanged
"""
inputs = {"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
self.advanced_settings.trigger_manual_changes()
# Check that the validation modal went away.
self.assertFalse(self.advanced_settings.is_validation_modal_present())
# Iterate through the wrong values and make sure they're still displayed
for key, val in inputs.iteritems():
self.assertEquals(
str(self.advanced_settings.get(key)),
str(val),
'manual change should keep: ' + str(val) + ', but is: ' + str(self.advanced_settings.get(key))
)
def check_modal_shows_correct_contents(self, wrong_settings_list):
"""
Helper function that checks if the validation modal contains correct
error messages.
"""
# Check presence of modal
self.assertTrue(self.advanced_settings.is_validation_modal_present())
# List of wrong settings item & what is presented in the modal should be the same
error_item_names = self.advanced_settings.get_error_item_names()
self.assertEqual(set(wrong_settings_list), set(error_item_names))
error_item_messages = self.advanced_settings.get_error_item_messages()
self.assertEqual(len(error_item_names), len(error_item_messages))
def get_settings_fields_of_each_type(self):
"""
Get one of each field type:
- String: Course Display Name
- List: Advanced Module List
- Dict: Discussion Topic Mapping
- Integer: Maximum Attempts
- Date: Course Announcement Date
"""
return {
"Course Display Name": self.advanced_settings.get('Course Display Name'),
"Advanced Module List": self.advanced_settings.get('Advanced Module List'),
"Discussion Topic Mapping": self.advanced_settings.get('Discussion Topic Mapping'),
"Maximum Attempts": self.advanced_settings.get('Maximum Attempts'),
"Course Announcement Date": self.advanced_settings.get('Course Announcement Date'),
}
def set_wrong_inputs_to_fields(self):
"""
Set wrong values for the chosen fields
"""
self.advanced_settings.set_values(
{
"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
)
def test_only_expected_fields_are_displayed(self):
"""
Scenario: The Advanced Settings screen displays settings/fields not specifically hidden from
view by a developer.
Given I have a set of CourseMetadata fields defined for the course
When I view the Advanced Settings screen for the course
The total number of fields displayed matches the number I expect
And the actual fields displayed match the fields I expect to see
"""
expected_fields = self.advanced_settings.expected_settings_names
displayed_fields = self.advanced_settings.displayed_settings_names
self.assertEquals(set(displayed_fields), set(expected_fields))
@attr(shard=1)
class ContentLicenseTest(StudioCourseTest):
"""
Tests for course-level licensing (that is, setting the license,
for an entire course's content, to All Rights Reserved or Creative Commons)
"""
def setUp(self): # pylint: disable=arguments-differ
super(ContentLicenseTest, self).setUp()
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.lms_courseware = CoursewarePage(
self.browser,
self.course_id,
)
self.settings_page.visit()
def test_empty_license(self):
"""
When I visit the Studio settings page,
I see that the course license is "All Rights Reserved" by default.
Then I visit the LMS courseware page,
and I see that the default course license is displayed.
"""
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_arr_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "All Rights Reserved",
and I refresh the page,
I see that the course license is "All Rights Reserved".
Then I visit the LMS courseware page,
and I see that the course license is "All Rights Reserved".
"""
self.settings_page.course_license = "All Rights Reserved"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_cc_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "Creative Commons",
and I refresh the page,
I see that the course license is "Creative Commons".
Then I visit the LMS courseware page,
and I see that the course license is "Some Rights Reserved".
"""
self.settings_page.course_license = "Creative Commons"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "Creative Commons")
self.lms_courseware.visit()
# The course_license text will include a bunch of screen reader text to explain
# the selected options
self.assertIn("Some Rights Reserved", self.lms_courseware.course_license)
@attr('a11y')
class StudioSettingsA11yTest(StudioCourseTest):
"""
Class to test Studio pages accessibility.
"""
def setUp(self): # pylint: disable=arguments-differ
super(StudioSettingsA11yTest, self).setUp()
self.settings_page = SettingsPage(self.browser, self.course_info['org'], self.course_info['number'],
self.course_info['run'])
def test_studio_settings_page_a11y(self):
"""
Check accessibility of SettingsPage.
"""
self.settings_page.visit()
self.settings_page.wait_for_page()
self.settings_page.a11y_audit.config.set_rules({
"ignore": [
'link-href', # TODO: AC-590
],
})
self.settings_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
class StudioSubsectionSettingsA11yTest(StudioCourseTest):
"""
Class to test accessibility on the subsection settings modals.
"""
def setUp(self): # pylint: disable=arguments-differ
browser = os.environ.get('SELENIUM_BROWSER', 'firefox')
# This test will fail if run using phantomjs < 2.0, due to an issue with bind()
# See https://github.com/ariya/phantomjs/issues/10522 for details.
# The course_outline uses this function, and as such will not fully load when run
# under phantomjs 1.9.8. So, to prevent this test from timing out at course_outline.visit(),
# force the use of firefox vs the standard a11y test usage of phantomjs 1.9.8.
# TODO: remove this block once https://openedx.atlassian.net/browse/TE-1047 is resolved.
if browser == 'phantomjs':
browser = 'firefox'
with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}):
super(StudioSubsectionSettingsA11yTest, self).setUp(is_staff=True)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
course_fixture.add_advanced_settings({
"enable_proctored_exams": {"value": "true"}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
)
)
def test_special_exams_menu_a11y(self):
"""
Given that I am a staff member
And I am editing settings on the special exams menu
Then that menu is accessible
"""
self.course_outline.visit()
self.course_outline.open_subsection_settings_dialog()
self.course_outline.select_advanced_tab()
# limit the scope of the audit to the special exams tab on the modal dialog
self.course_outline.a11y_audit.config.set_scope(
include=['section.edit-settings-timed-examination']
)
self.course_outline.a11y_audit.check_for_accessibility_errors()
@attr(shard=1)
class StudioSettingsImageUploadTest(StudioCourseTest):
"""
Class to test course settings image uploads.
"""
def setUp(self): # pylint: disable=arguments-differ
super(StudioSettingsImageUploadTest, self).setUp()
self.settings_page = SettingsPage(self.browser, self.course_info['org'], self.course_info['number'],
self.course_info['run'])
self.settings_page.visit()
# Ensure jquery is loaded before running a jQuery
self.settings_page.wait_for_ajax()
# This text appears towards the end of the work that jQuery is performing on the page
self.settings_page.wait_for_jquery_value('input#course-name:text', 'test_run')
def test_upload_course_card_image(self):
# upload image
file_to_upload = 'image.jpg'
self.settings_page.upload_image('#upload-course-image', file_to_upload)
self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#course-image'))
def test_upload_course_banner_image(self):
# upload image
file_to_upload = 'image.jpg'
self.settings_page.upload_image('#upload-banner-image', file_to_upload)
self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#banner-image'))
def test_upload_course_video_thumbnail_image(self):
# upload image
file_to_upload = 'image.jpg'
self.settings_page.upload_image('#upload-video-thumbnail-image', file_to_upload)
self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#video-thumbnail-image'))
@attr(shard=1)
class CourseSettingsTest(StudioCourseTest):
"""
Class to test course settings.
"""
COURSE_START_DATE_CSS = "#course-start-date"
COURSE_END_DATE_CSS = "#course-end-date"
ENROLLMENT_START_DATE_CSS = "#course-enrollment-start-date"
ENROLLMENT_END_DATE_CSS = "#course-enrollment-end-date"
COURSE_START_TIME_CSS = "#course-start-time"
COURSE_END_TIME_CSS = "#course-end-time"
ENROLLMENT_START_TIME_CSS = "#course-enrollment-start-time"
ENROLLMENT_END_TIME_CSS = "#course-enrollment-end-time"
course_start_date = '12/20/2013'
course_end_date = '12/26/2013'
enrollment_start_date = '12/01/2013'
enrollment_end_date = '12/10/2013'
dummy_time = "15:30"
def setUp(self, is_staff=False, test_xss=True):
super(CourseSettingsTest, self).setUp()
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Before every test, make sure to visit the page first
self.settings_page.visit()
self.ensure_input_fields_are_loaded()
def set_course_dates(self):
"""
Set dates for the course.
"""
dates_dictionary = {
self.COURSE_START_DATE_CSS: self.course_start_date,
self.COURSE_END_DATE_CSS: self.course_end_date,
self.ENROLLMENT_START_DATE_CSS: self.enrollment_start_date,
self.ENROLLMENT_END_DATE_CSS: self.enrollment_end_date
}
self.settings_page.set_element_values(dates_dictionary)
def ensure_input_fields_are_loaded(self):
"""
Ensures values in input fields are loaded.
"""
EmptyPromise(
lambda: self.settings_page.q(css='#course-organization').attrs('value')[0],
"Waiting for input fields to be loaded"
).fulfill()
def test_user_can_set_course_date(self):
"""
Scenario: User can set course dates
Given I have opened a new course in Studio
When I select Schedule and Details
And I set course dates
And I press the "Save" notification button
And I reload the page
Then I see the set dates
"""
# Set dates
self.set_course_dates()
# Set times
time_dictionary = {
self.COURSE_START_TIME_CSS: self.dummy_time,
self.ENROLLMENT_END_TIME_CSS: self.dummy_time
}
self.settings_page.set_element_values(time_dictionary)
# Save changes
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS,
self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS,
self.COURSE_START_TIME_CSS, self.ENROLLMENT_END_TIME_CSS]
expected_values = [self.course_start_date, self.course_end_date,
self.enrollment_start_date, self.enrollment_end_date,
self.dummy_time, self.dummy_time]
# Assert changes have been persistent.
self.assertEqual(
[get_input_value(self.settings_page, css_selector) for css_selector in css_selectors],
expected_values
)
def test_clear_previously_set_course_dates(self):
"""
Scenario: User can clear previously set course dates (except start date)
Given I have set course dates
And I clear all the dates except start
And I press the "Save" notification button
And I reload the page
Then I see cleared dates
"""
# Set dates
self.set_course_dates()
# Clear all dates except start date
values_to_set = {
self.COURSE_END_DATE_CSS: '',
self.ENROLLMENT_START_DATE_CSS: '',
self.ENROLLMENT_END_DATE_CSS: ''
}
self.settings_page.set_element_values(values_to_set)
# Save changes and refresh the page
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS,
self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS]
expected_values = [self.course_start_date, '', '', '']
# Assert changes have been persistent.
self.assertEqual(
[get_input_value(self.settings_page, css_selector) for css_selector in css_selectors],
expected_values
)
def test_cannot_clear_the_course_start_date(self):
"""
Scenario: User cannot clear the course start date
Given I have set course dates
And I press the "Save" notification button
And I clear the course start date
Then I receive a warning about course start date
And I reload the page
And the previously set start date is shown
"""
# Set dates
self.set_course_dates()
# Save changes
self.settings_page.save_changes()
# Get default start date
default_start_date = get_input_value(self.settings_page, self.COURSE_START_DATE_CSS)
# Set course start date to empty
self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: ''})
# Make sure error message is show with appropriate message
error_message_css = '.message-error'
self.settings_page.wait_for_element_presence(error_message_css, 'Error message is present')
self.assertEqual(element_has_text(self.settings_page, error_message_css,
"The course must have an assigned start date."), True)
# Refresh the page and assert start date has not changed.
self.settings_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
self.assertEqual(
get_input_value(self.settings_page, self.COURSE_START_DATE_CSS),
default_start_date
)
def test_user_can_correct_course_start_date_warning(self):
"""
Scenario: User can correct the course start date warning
Given I have tried to clear the course start
And I have entered a new course start date
And I press the "Save" notification button
Then The warning about course start date goes away
And I reload the page
Then my new course start date is shown
"""
# Set course start date to empty
self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: ''})
# Make sure we get error message
error_message_css = '.message-error'
self.settings_page.wait_for_element_presence(error_message_css, 'Error message is present')
self.assertEqual(element_has_text(self.settings_page, error_message_css,
"The course must have an assigned start date."), True)
# Set new course start value
self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: self.course_start_date})
self.settings_page.un_focus_input_field()
# Error message disappears
self.settings_page.wait_for_element_absence(error_message_css, 'Error message is not present')
# Save the changes and refresh the page.
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
# Assert changes are persistent.
self.assertEqual(
get_input_value(self.settings_page, self.COURSE_START_DATE_CSS),
self.course_start_date
)
def test_settings_are_only_persisted_when_saved(self):
"""
Scenario: Settings are only persisted when saved
Given I have set course dates
And I press the "Save" notification button
When I change fields
And I reload the page
Then I do not see the changes
"""
# Set course dates.
self.set_course_dates()
# Save changes.
self.settings_page.save_changes()
default_value_enrollment_start_date = get_input_value(self.settings_page,
self.ENROLLMENT_START_TIME_CSS)
# Set the value of enrollment start time and
# reload the page without saving.
self.settings_page.set_element_values({self.ENROLLMENT_START_TIME_CSS: self.dummy_time})
self.settings_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS,
self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS,
self.ENROLLMENT_START_TIME_CSS]
expected_values = [self.course_start_date, self.course_end_date,
self.enrollment_start_date, self.enrollment_end_date,
default_value_enrollment_start_date]
# Assert that value of enrolment start time
# is not saved.
self.assertEqual(
[get_input_value(self.settings_page, css_selector) for css_selector in css_selectors],
expected_values
)
def test_settings_are_reset_on_cancel(self):
"""
Scenario: Settings are reset on cancel
Given I have set course dates
And I press the "Save" notification button
When I change fields
And I press the "Cancel" notification button
Then I do not see the changes
"""
# Set course date
self.set_course_dates()
# Save changes
self.settings_page.save_changes()
default_value_enrollment_start_date = get_input_value(self.settings_page,
self.ENROLLMENT_START_TIME_CSS)
# Set value but don't save it.
self.settings_page.set_element_values({self.ENROLLMENT_START_TIME_CSS: self.dummy_time})
self.settings_page.click_button("cancel")
# Make sure changes are not saved after cancel.
css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS,
self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS,
self.ENROLLMENT_START_TIME_CSS]
expected_values = [self.course_start_date, self.course_end_date,
self.enrollment_start_date, self.enrollment_end_date,
default_value_enrollment_start_date]
self.assertEqual(
[get_input_value(self.settings_page, css_selector) for css_selector in css_selectors],
expected_values
)
def test_confirmation_is_shown_on_save(self):
"""
Scenario: Confirmation is shown on save
Given I have opened a new course in Studio
When I select Schedule and Details
And I change the "<field>" field to "<value>"
And I press the "Save" notification button
Then I see a confirmation that my changes have been saved
"""
# Set date
self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: self.course_start_date})
# Confirmation is showed upon save.
# Save_changes function ensures that save
# confirmation is shown.
self.settings_page.save_changes()
def test_changes_in_course_overview_show_a_confirmation(self):
"""
Scenario: Changes in Course Overview show a confirmation
Given I have opened a new course in Studio
When I select Schedule and Details
And I change the course overview
And I press the "Save" notification button
Then I see a confirmation that my changes have been saved
"""
# Change the value of course overview
self.settings_page.change_course_description('Changed overview')
# Save changes
# Save_changes function ensures that save
# confirmation is shown.
self.settings_page.save_changes()
def test_user_cannot_save_invalid_settings(self):
"""
Scenario: User cannot save invalid settings
Given I have opened a new course in Studio
When I select Schedule and Details
And I change the "Course Start Date" field to ""
Then the save notification button is disabled
"""
# Change the course start date to invalid date.
self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: ''})
# Confirm that save button is disabled.
self.assertEqual(self.settings_page.is_element_present(".action-primary.action-save.is-disabled"), True)
| fintech-circle/edx-platform | common/test/acceptance/tests/studio/test_studio_settings.py | Python | agpl-3.0 | 39,646 | [
"VisIt"
] | f994cc91725a39fb4d48f60ab3d53c28a361f07ad10d51c39a24450133d51488 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Mozilla Firefox history database plugin."""
import collections
import unittest
# pylint: disable=unused-import
from plaso.formatters import firefox as firefox_formatter
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import timelib_test
from plaso.parsers.sqlite_plugins import firefox
from plaso.parsers.sqlite_plugins import interface
from plaso.parsers.sqlite_plugins import test_lib
class FirefoxHistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox history database plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
pre_obj = event.PreprocessObject()
self._plugin = firefox.FirefoxHistoryPlugin(pre_obj)
def testProcessPriorTo24(self):
"""Tests the Process function on a Firefox History database file."""
# This is probably version 23 but potentially an older version.
test_file = self._GetTestFilePath(['places.sqlite'])
cache = interface.SQLiteCache()
event_generator = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjects(event_generator)
# The places.sqlite file contains 205 events (1 page visit,
# 2 x 91 bookmark records, 2 x 3 bookmark annotations,
# 2 x 8 bookmark folders).
# However there are three events that do not have a timestamp
# so the test file will show 202 extracted events.
self.assertEquals(len(event_objects), 202)
# Check the first page visited event.
event_object = event_objects[0]
self.assertEquals(event_object.data_type, 'firefox:places:page_visited')
self.assertEquals(event_object.timestamp_desc,
eventdata.EventTimestamp.PAGE_VISITED)
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2011-07-01 11:16:21.371935')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_url = u'http://news.google.com/'
self.assertEquals(event_object.url, expected_url)
expected_title = u'Google News'
self.assertEquals(event_object.title, expected_title)
expected_msg = (
u'{} ({}) [count: 1] Host: news.google.com '
u'(URL not typed directly) Transition: TYPED').format(
expected_url, expected_title)
expected_short = u'URL: {}'.format(expected_url)
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the first bookmark event.
event_object = event_objects[1]
self.assertEquals(event_object.data_type, 'firefox:places:bookmark')
self.assertEquals(event_object.timestamp_desc,
eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2011-07-01 11:13:59.266344+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
# Check the second bookmark event.
event_object = event_objects[2]
self.assertEquals(event_object.data_type, 'firefox:places:bookmark')
self.assertEquals(event_object.timestamp_desc,
eventdata.EventTimestamp.MODIFICATION_TIME)
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2011-07-01 11:13:59.267198+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_url = (
u'place:folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder=TOOLBAR&'
u'sort=12&excludeQueries=1&excludeItemIfParentHasAnnotation=livemark%2F'
u'feedURI&maxResults=10&queryType=1')
self.assertEquals(event_object.url, expected_url)
expected_title = u'Recently Bookmarked'
self.assertEquals(event_object.title, expected_title)
expected_msg = (
u'Bookmark URL {} ({}) [folder=BOOKMARKS_MENU&'
u'folder=UNFILED_BOOKMARKS&folder=TOOLBAR&sort=12&excludeQueries=1&'
u'excludeItemIfParentHasAnnotation=livemark%2FfeedURI&maxResults=10&'
u'queryType=1] visit count 0').format(
expected_title, expected_url)
expected_short = (
u'Bookmarked Recently Bookmarked '
u'(place:folder=BOOKMARKS_MENU&folder=UNFILED_BO...')
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the first bookmark annotation event.
event_object = event_objects[183]
self.assertEquals(
event_object.data_type, 'firefox:places:bookmark_annotation')
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2011-07-01 11:13:59.267146+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
# Check another bookmark annotation event.
event_object = event_objects[184]
self.assertEquals(
event_object.data_type, 'firefox:places:bookmark_annotation')
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2011-07-01 11:13:59.267605+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_url = (u'place:sort=14&type=6&maxResults=10&queryType=1')
self.assertEquals(event_object.url, expected_url)
expected_title = u'Recent Tags'
self.assertEquals(event_object.title, expected_title)
expected_msg = (
u'Bookmark Annotation: [RecentTags] to bookmark '
u'[{}] ({})').format(
expected_title, expected_url)
expected_short = u'Bookmark Annotation: Recent Tags'
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the second last bookmark folder event.
event_object = event_objects[200]
self.assertEquals(event_object.data_type, 'firefox:places:bookmark_folder')
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2011-03-21 10:05:01.553774+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
# Check the last bookmark folder event.
event_object = event_objects[201]
self.assertEquals(
event_object.data_type, 'firefox:places:bookmark_folder')
self.assertEquals(
event_object.timestamp_desc,
eventdata.EventTimestamp.MODIFICATION_TIME)
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2011-07-01 11:14:11.766851+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_title = u'Latest Headlines'
self.assertEquals(event_object.title, expected_title)
expected_msg = expected_title
expected_short = expected_title
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
def testProcessVersion25(self):
"""Tests the Process function on a Firefox History database file v 25."""
test_file = self._GetTestFilePath(['places_new.sqlite'])
cache = interface.SQLiteCache()
event_generator = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjects(event_generator)
# The places.sqlite file contains 84 events:
# 34 page visits.
# 28 bookmarks
# 14 bookmark folders
# 8 annotations
self.assertEquals(len(event_objects), 84)
counter = collections.Counter()
for event_object in event_objects:
counter[event_object.data_type] += 1
self.assertEquals(counter['firefox:places:bookmark'], 28)
self.assertEquals(counter['firefox:places:page_visited'], 34)
self.assertEquals(counter['firefox:places:bookmark_folder'], 14)
self.assertEquals(counter['firefox:places:bookmark_annotation'], 8)
random_event = event_objects[10]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-10-30 21:57:11.281942')
self.assertEquals(random_event.timestamp, expected_timestamp)
expected_short = u'URL: http://code.google.com/p/plaso'
expected_msg = (
u'http://code.google.com/p/plaso [count: 1] Host: code.google.com '
u'(URL not typed directly) Transition: TYPED')
self._TestGetMessageStrings(random_event, expected_msg, expected_short)
class FirefoxDownloadsPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox downloads database plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
pre_obj = event.PreprocessObject()
self._plugin = firefox.FirefoxDownloadsPlugin(pre_obj)
def testProcessVersion25(self):
"""Tests the Process function on a Firefox Downloads database file."""
test_file = self._GetTestFilePath(['downloads.sqlite'])
cache = interface.SQLiteCache()
event_generator = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjects(event_generator)
# The downloads.sqlite file contains 2 events (1 download)
self.assertEquals(len(event_objects), 2)
# Check the first page visited event.
event_object = event_objects[0]
self.assertEquals(event_object.data_type, 'firefox:downloads:download')
self.assertEquals(event_object.timestamp_desc,
eventdata.EventTimestamp.START_TIME)
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2013-07-18 18:59:59.312000+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_url = (
u'https://plaso.googlecode.com/files/'
u'plaso-static-1.0.1-win32-vs2008.zip')
self.assertEquals(event_object.url, expected_url)
expected_full_path = u'file:///D:/plaso-static-1.0.1-win32-vs2008.zip'
self.assertEquals(event_object.full_path, expected_full_path)
self.assertEquals(event_object.received_bytes, 15974599)
self.assertEquals(event_object.total_bytes, 15974599)
if __name__ == '__main__':
unittest.main()
| iwm911/plaso | plaso/parsers/sqlite_plugins/firefox_test.py | Python | apache-2.0 | 10,639 | [
"VisIt"
] | d72f5ea44b9a51e2ac7de3e708e260cc4a25cd3a847321c3fd811b3f57038366 |
from __future__ import print_function
#import global_vars
import os
import exceptions
import smtplib
import email.mime.text
import copy
"""
class Classifier(object):
name = ""
l = [] #Classifier | ParamList
def __init__(self, label):
self.name = label
def prnt(self):
print("ClassifierName", self.name)
for el in self.l:
el.prnt()
"""
class myType:
typeName = ""
tp = type(type)
def __init__(self, name):
self.typeName = name
if self.typeName == 'String':
tp = type('')
if self.typeName == 'Boolean':
tp = type(False)
def __str__(self):
return self.typeName
class ParamList(object):
l = [] #pair<Param, Type|TypeList >
def __init__(self):
self.l = []
def addStr(self, param, tpList):
if (tpList.typeName == 'Connector'):
pass #proceed connectors of type list
tpList.pList = ParamList();
self.l.append((param, tpList))
def addTbl(self, tbl):
for el in tbl.l:
self.addStr(el[0], el[1])
def printMeTable(self, level):
for el in self.l:
print(level*'\t', el[0], end=', ')
print(el[1])
class TreeNode(object):
label = ""
subNodeList = []#List of elements of TreeNode type
paramList = ParamList()
level = 0
def __init__(self, par):
if type(par) == type(""):
self.label = par
self.subNodeList = []
elif type(par) == type(self.paramList):
self.paramList = par
self.subNodeList = []
def addNode(self, Node):
print("addNDndNDdnd")
self.subNodeList.append(TreeNode(Node))
def addNodeList(self, SomeList):
NodeList = []
for Node in SomeList:
print("!")
NodeList.append(TreeNode(Node))
print(NodeList)
self.subNodeList += NodeList
def printMe(self, level):
print (level*'\t'+"label:", self.label)
print (level*'\t'+"paramListTable:")
self.paramList.printMeTable(level)
def printMeAndNodes(self, level):
self.printMe(level)
print (level*'\t'+"subNodeList:")
if self.subNodeList != []:
for Node in self.subNodeList:
Node.printMeAndNodes(level+1)
class ClassifierTree(object):
root = TreeNode('')
def __init__(self):
self.root = TreeNode('root')
headersList = ['Input section', 'Required section', 'Additional section', 'Advanced section', 'Output section', 'Run section']
self.root.addNodeList(headersList)
def printTree(self):
self.root.printMeAndNodes(0)
class TypeTable(ParamList):
l = []
#parName = ""
#tp = myType('type')
def __init__(self, tree):
pass
def addTabl(self, ParamList):
pass
T = ClassifierTree()
#T.root.l[0].l
PL1 = ParamList()
PL1.addStr('InputSequence', myType('Connector'))
PL1.addStr('ProduceDendrogram', myType('Boolean'))
PL1.addStr('Proceed with old dendrogram', myType('Boolean'))
T.root.subNodeList[0].addNode(PL1)
#T.root.l[1].l
PL2 = ParamList()
PL2.addStr('TstParam1', myType('TstType1'))
PL2.addStr('TstParam2', myType('TstType2'))
PL2.addStr('TstParam3', myType('TstType3'))
T.root.subNodeList[1].addNode(PL2)
T.printTree()
P3 = ParamList()
P3.addStr('ololo1', myType('ololo1'))
P3.addStr('ololo2', myType('ololo2'))
typeTable = ParamList()
"""
typeTable.addTbl(PL1)
typeTable.addTbl(PL2)
typeTable.addTbl(P3)
typeTable.printMeTable(0)
"""
def gatherTypeTable(treeNode, typeTable):
#print('---')
#print(type(treeNode.paramList))
#print(type(typeTable))
#print('---')
if treeNode.subNodeList == []:
pass
if type(treeNode.paramList) == type(typeTable):
typeTable.addTbl(treeNode.paramList)
#else:
#print(treeNode.subNodeList)
for eachNode in treeNode.subNodeList:
gatherTypeTable(eachNode, typeTable)
gatherTypeTable(T.root, typeTable)
typeTable.printMeTable(0)
class ParamValues(object):
l = [] #pair<Param, Value>
def __init__(self):
self.l = []
def addStr(self, param, value):
if (type(Value) == 'Connector'):
pass #proceed connectors of type list
tpList.pList = ParamList();
self.l.append((param, value))
def addTbl(self, tbl):
for el in tbl.l:
self.addStr(el[0], el[1])
def checkValidity(self, typeTable):
for el in tbl.l:
if type(l) = el.index #here we need map
def printMeTable(self, level):
for el in self.l:
print(level*'\t', el[0], end=', ')
print(el[1])
"""
class ParametersPlan:
Nomination = "";
Code = 0;
PlanSpeciesParametersElement Owner;
def __init__(self)ParametersPlan();
#virtual ~ParametersPlan();
class PlanSpeciesParametersElement
Nomination = "";
Code = 0;
enum DataType;
def __init__(self)#PlanSpeciesParametersElement();
#virtual ~PlanSpeciesParametersElement();
class PlanSpeciesParameters
ElList = () #List<PlanSpeciesParametersElement*> list;
def __init__(self) #PlanSpeciesParameters();
#virtual ~PlanSpeciesParameters();
class PropertyValues
def __init__(self) PropertyValues();
#virtual ~PropertyValues();
class TaskPropertyValues
def __init__(self) TaskPropertyValues();
#virtual ~TaskPropertyValues();
class Task_exception(exceptions.Exception):
def __init__(self,message):
self.message=message
def __str__(self):
return " Task error:"+message
class Task:
def __init__(self,db_row):
self.user_id = db_row[0]
self.task_id = db_row[1]
self.algorithm = db_row[2]
self.num_procs = db_row[3]
self.duration_in_minutes = db_row[4]
self.task_status = db_row[5]
self.host = db_row[6]
self.path = db_row[7]
self.user_on_mult = db_row[8]
self.email = db_row[9]
self.priority_run = db_row[10]
self.priority_max = db_row[11]
self.running_time = db_row[12]
aligner self.queue_num = db_row[13]
self.db_set = db_row[14]
self.seq_type = ""
self.blast_outp_detail_lvl= 0
self.seq_simil_thrshld = 0
def init_blast_task(self,db_row):
if len(db_row)>0:
self.seq_type = db_row[0]
self.blast_outp_detail_lvl = db_row[1]
self.seq_simil_thrshld = db_row[2]
def upload_data(self):
if self.algorithm == "FitProt":
run_fitprot="cd %s/%d/%d; /home/romanenkov/fitprot/search_substs.py -p %s/%d/%d/structure.pdb -s %s/%d/%d/selection.txt -o %s/%d/%d/tmp > /home/romanenkov/aligner/ssh_run/fit_log.txt; scp %s/%d/%d/energy_array %s@%s:%s/%d.en" %\
(
global_vars.data_path,
self.user_id,
self.task_id,
global_vars.data_path,
self.user_id,
self.task_id,
global_vars.data_path,
self.user_id,
self.task_id,
global_vars.data_path,
self.user_id,
self.task_id,
global_vars.data_path,
self.user_id,
self.task_id,
self.user_on_mult,
self.host,
self.path,
self.task_id
)
#test="echo %s" %\
#(
# run_fitprot
#)
#os.system(test)
print run_fitprot
os.system(run_fitprot)
#string4="scp %s/%d/%d/energy_array %s@%s:%s/energy_array" %\
#(
# global_vars.data_path,
# self.user_id,
# self.task_id,
# self.host,
# self.path
#)
#os.system(string4)
string1="scp %s/%d/%d/structure.pdb %s@%s:%s/%d.pdb" %\
(
global_vars.data_path,
self.user_id,
self.task_id,
self.user_on_mult,
self.host,
self.path,
self.task_id
)
string2="scp %s/%d/%d/selection.txt %s@%s:%s/%d.txt" %\
(
global_vars.data_path,
self.user_id,
self.task_id,
self.user_on_mult,
self.host,
self.path,
self.task_id
)
print " Task.upload_data(): %s" %string1
status1=os.system(string1)
print " Task.upload_data(): %s" %string2
status2=os.system(string2)
if status1 or status2:
raise Task_exception("scp failed!")
else:
if self.algorithm == "nhunt":
#run_nhunt="cd %s/%d/%d; /home/romanenkov/nhunt/nhunt -i %s/%d/%d/sequences.fasta -d /home/romanenkov/nhunt/db.fasta; scp %s/%d/%d/nhunt.out %s@%s:%s/%d.out" %\
#(
# global_vars.data_path,
# self.user_id,
# self.task_id,
# global_vars.data_path,
# self.user_id,
# self.task_id,
# global_vars.data_path,
# self.user_id,
# self.task_id,
# self.user_on_mult,
# self.host,
# self.path,
# self.task_id
#)
#print run_nhunt
#status3=os.system(run_nhunt)
#print status3
#if status3:
# raise Task_exception("11scp failed!11")
print "1 done"
string1="scp %s/%d/%d/sequences.fasta %s@%s:%s/%d.fasta" %\
(
global_vars.data_path,
self.user_id,
self.task_id,
self.user_on_mult,
self.host,
self.path,
self.task_id
)
print "ready1"
print " Task.upload_data(): %s" %string1
status1=os.system(string1)
#print status1
#print "ready2"
if status1:
raise Task_exception("scp failed!")
#print "2 done"
#string2="scp %s/../nhunt/db.fasta %s@%s:%s/db%d.fasta" %\
#(
# global_vars.data_path,
# self.user_on_mult,
# self.host,
# self.path,
# self.task_id
#)
#print " Task.upload_data(): %s" %string2
#status2=os.system(string2)
#if status2:
# raise Task_exception("scp failed!")
#print "3 done"
else:
string="scp %s/%d/%d/sequences.fasta %s@%s:%s/%d.fasta" %\
(
global_vars.data_path,
self.user_id,
self.task_id,
self.user_on_mult,
self.host,
self.path,
self.task_id
)
print " Task.upload_data(): %s" %string
status=os.system(string)
if status:
raise Task_exception("scp failed!")
def run(self):
if self.algorithm == "nhunt":
string="ssh %s@%s \"cd %s; ./scheduler_make_align.sh %d %d %d.fasta %d '%s' %d \"" %\
(
self.user_on_mult,
self.host,
self.path,
self.task_id,
self.num_procs,
self.task_id,
self.duration_in_minutes,
self.algorithm,
self.db_set
)
else:
string="ssh %s@%s \"cd %s; ./scheduler_make_align.sh %d %d %d.fasta %d '%s'\"" %\
(
self.user_on_mult,
self.host,
self.path,
self.task_id,
self.num_procs,
self.task_id,
self.duration_in_minutes,
self.algorithm
)
print " Task.run(): %s" %string
status=os.system(string)
return status / 256
def check(self):
string="ssh %s@%s \"cd %s; ./scheduler_check_align.sh %d '%s'\"" %\
(
self.user_on_mult,
self.host,
self.path,
self.task_id,
self.algorithm
)
print string
status=os.system(string) / 256
print " Task.check(): status length = %d" % status
return status
def download_data(self):
string="scp -r %s@%s:%s/%d/\* %s/%d/%d/" %\
(
self.user_on_mult,
self.host,
self.path,
self.task_id,
global_vars.data_path,
self.user_id,
self.task_id
)
print " Task.download_data(): %s" %string
status=os.system(string)
if status:
raise Task_exception("scp failed!")
string="chmod -Rf g+wrX %s/%d/%d" %\
(
global_vars.data_path,
self.user_id,
self.task_id
)
status=os.system(string)
string="chgrp -Rf %s %s/%d/%d" %\
(
global_vars.local_group,
global_vars.data_path,
self.user_id,
self.task_id
)
status=os.system(string)
def clear_remote_data(self):
string="ssh %s@%s \"cd %s; ./clear_data_align.sh %d\"" %\
(
self.user_on_mult,
self.host,
self.path,
self.task_id,
)
print " ",string
status=os.system(string)
return status / 256
def remote_task_delete(self):
string="ssh %s@%s \"cd %s; ./scheduler_delete_align.sh %d\"" %\
(
self.user_on_mult,
self.host,
self.path,
self.task_id,
)
print " ",string
status=os.system(string)
return status / 256
def email_notify_on_finish(self,status):
msg_text=\
"""
'''
Dear user, your task with ID %d was finished on multiprocessor with status '%s'.
Please, visit page
https://%s/%s/pages/edit_task.php?task_id=%d
'''
'''
%\
(
self.task_id,
status,
global_vars.site_address,
global_vars.path_on_site,
self.task_id
)
msg= email.mime.text.MIMEText(msg_text)
msg['Subject']= "Information about state of the task with number %d on the Aligner website" % (self.task_id)
from_str="\"Aligner site administration\" <webmaster@%s>" % (global_vars.site_address)
msg['From']= from_str
msg['To']=self.email
server = smtplib.SMTP('localhost')
server.sendmail("webmaster@%s" %global_vars.site_address,[self.email],msg.as_string())
server.quit()
'''
| Abi1ity/uniclust2.0 | old_uniclust_model/task1.py | Python | bsd-3-clause | 12,191 | [
"VisIt"
] | ba5d4e9e8736d3982127b843a998959bd3232a4267f3d5ba3e68df0a9f429562 |
"""
Represents a diffraction setup using a shadow3 preprocessor (bragg) file
Except for energy, all units are in SI.
"""
import numpy
import scipy.constants as codata
from crystalpy.diffraction.DiffractionSetupAbstract import DiffractionSetupAbstract
from shadow4.physical_models.bragg.bragg import Bragg
from crystalpy.diffraction.GeometryType import BraggDiffraction
class S4DiffractionSetup(DiffractionSetupAbstract, Bragg):
def __init__(self, geometry_type=BraggDiffraction, crystal_name="", thickness=1e-6,
miller_h=1, miller_k=1, miller_l=1,
asymmetry_angle=0.0,
azimuthal_angle=0.0,
preprocessor_file=""):
"""
Constructor.
:param geometry_type: GeometryType (BraggDiffraction,...).
:param crystal_name: The name of the crystal, e.g. Si.
:param thickness: The crystal thickness.
:param miller_h: Miller index H.
:param miller_k: Miller index K.
:param miller_l: Miller index L.
:param asymmetry_angle: The asymmetry angle between surface normal and Bragg normal (radians).
:param azimuthal_angle: The angle between the projection of the Bragg normal
on the crystal surface plane and the x axis (radians).
"""
DiffractionSetupAbstract.__init__(self, geometry_type=geometry_type, crystal_name=crystal_name,
thickness=thickness,
miller_h=miller_h, miller_k=miller_k, miller_l=miller_l,
asymmetry_angle=asymmetry_angle, azimuthal_angle=azimuthal_angle)
Bragg.__init__(self, preprocessor_file=preprocessor_file, preprocessor_dictionary=None)
if preprocessor_file != "":
self.load_preprocessor_file()
#
# implementation of abstract methods uin DiffractionSetupAbstract
#
def F0(self, energy, ratio=None):
"""
Calculate F0 from Zachariasen.
:param energy: photon energy in eV.
:return: F0
"""
F_0, FH, FH_BAR, STRUCT, FA, FB = self.structure_factor(energy, ratio)
return F_0
def FH(self, energy, ratio=None):
"""
Calculate FH from Zachariasen.
:param energy: photon energy in eV.
:return: FH
"""
F_0, FH, FH_BAR, STRUCT, FA, FB = self.structure_factor(energy, ratio)
return FH
def FH_bar(self, energy, ratio=None):
"""
Calculate FH_bar from Zachariasen.
:param energy: photon energy in eV.
:return: FH_bar
"""
F_0, FH, FH_BAR, STRUCT, FA, FB = self.structure_factor(energy, ratio)
return FH_BAR
def dSpacing(self):
"""
Returns the lattice spacing d in A
:return: Lattice spacing.
"""
return self._preprocessor_dictionary["dspacing_in_cm"] * 1e8
def angleBragg(self, energy):
"""
Returns the Bragg angle for a given energy.
:param energy: Energy to calculate the Bragg angle for.
:return: Bragg angle.
"""
return numpy.arcsin( self.wavelength_in_A(energy) / 2 / self.dSpacing())
def unitcellVolume(self):
"""
Returns the unit cell volume.
:return: Unit cell volume
"""
# Retrieve unit cell volume from xraylib.
one_over_volume_times_electron_radius_in_cm = self.get_preprocessor_dictionary()["one_over_volume_times_electron_radius_in_cm"]
# codata_e2_mc2 = 2.81794032e-15 = Classical electron radius in S.I.
codata_e2_mc2 = codata.hbar * codata.alpha / codata.m_e / codata.c
one_over_volume_in_cm = one_over_volume_times_electron_radius_in_cm / (codata_e2_mc2 * 1e2)
unit_cell_volume = 1.0 / one_over_volume_in_cm * (1e-2)**3
return unit_cell_volume * (1e10)**3
if __name__ == "__main__":
from crystalpy.diffraction.GeometryType import BraggDiffraction
s4diffraction_setup = S4DiffractionSetup(geometry_type = BraggDiffraction,
crystal_name = "Si",
thickness = 100e-6,
miller_h = 1,
miller_k = 1,
miller_l = 1,
asymmetry_angle = 0.0,
azimuthal_angle = 0.0,
preprocessor_file="bragg_xop.dat")
print(s4diffraction_setup.info(8000)) | srio/minishadow | shadow4/physical_models/bragg/s4_diffraction_setup.py | Python | mit | 4,753 | [
"CRYSTAL"
] | 115f49e39ed4bff558b0066ae012749e4c3235fc75481ff4843f86ac1dfc0104 |
# The MIT License
#
# Copyright 2014, 2015 Piotr Dabkowski
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from __future__ import unicode_literals
from pyjsparserdata import *
from std_nodes import *
from pprint import pprint
ESPRIMA_VERSION = '2.2.0'
DEBUG = False
# Small naming convention changes
# len -> leng
# id -> d
# type -> typ
# str -> st
true = True
false = False
null = None
class PyJsParser:
""" Usage:
parser = PyJsParser()
parser.parse('var JavaScriptCode = 5.1')
"""
def __init__(self):
self.clean()
def test(self, code):
pprint(self.parse(code))
def clean(self):
self.strict = None
self.sourceType = None
self.index = 0
self.lineNumber = 1
self.lineStart = 0
self.hasLineTerminator = None
self.lastIndex = None
self.lastLineNumber = None
self.lastLineStart = None
self.startIndex = None
self.startLineNumber = None
self.startLineStart = None
self.scanning = None
self.lookahead = None
self.state = None
self.extra = None
self.isBindingElement = None
self.isAssignmentTarget = None
self.firstCoverInitializedNameError = None
# 7.4 Comments
def skipSingleLineComment(self, offset):
start = self.index - offset;
while self.index < self.length:
ch = self.source[self.index];
self.index += 1
if isLineTerminator(ch):
if (ord(ch) == 13 and ord(self.source[self.index]) == 10):
self.index += 1
self.lineNumber += 1
self.hasLineTerminator = True
self.lineStart = self.index
return
def skipMultiLineComment(self):
while self.index < self.length:
ch = ord(self.source[self.index])
if isLineTerminator(ch):
if (ch == 0x0D and ord(self.source[self.index+1]) == 0x0A):
self.index += 1
self.lineNumber += 1
self.index += 1
self.hasLineTerminator = True
self.lineStart = self.index
elif ch == 0x2A:
# Block comment ends with '*/'.
if ord(self.source[self.index+1]) == 0x2F:
self.index += 2
return
self.index += 1
else:
self.index += 1
self.tolerateUnexpectedToken()
def skipComment(self):
self.hasLineTerminator = False
start = (self.index==0)
while self.index < self.length:
ch = ord(self.source[self.index])
if isWhiteSpace(ch):
self.index += 1
elif isLineTerminator(ch):
self.hasLineTerminator = True
self.index += 1
if (ch == 0x0D and ord(self.source[self.index]) == 0x0A):
self.index += 1
self.lineNumber += 1
self.lineStart = self.index
start = True
elif (ch == 0x2F): # U+002F is '/'
ch = ord(self.source[self.index+1])
if (ch == 0x2F):
self.index += 2
self.skipSingleLineComment(2)
start = True
elif (ch == 0x2A): # U+002A is '*'
self.index += 2
self.skipMultiLineComment()
else:
break
elif (start and ch == 0x2D): # U+002D is '-'
# U+003E is '>'
if (ord(self.source[self.index+1]) == 0x2D) and (ord(self.source[self.index+2]) == 0x3E):
# '-->' is a single-line comment
self.index += 3
self.skipSingleLineComment(3)
else:
break
elif (ch == 0x3C): # U+003C is '<'
if self.source[self.index+1: self.index+4]=='!--':
# <!--
self.index += 4
self.skipSingleLineComment(4)
else:
break
else:
break
def scanHexEscape(self, prefix):
code = 0
leng = 4 if (prefix == 'u') else 2
for i in xrange(leng):
if self.index < self.length and isHexDigit(self.source[self.index]):
ch = self.source[self.index]
self.index += 1
code = code * 16 + HEX_CONV[ch]
else:
return ''
return unichr(code)
def scanUnicodeCodePointEscape(self):
ch = self.source[self.index]
code = 0
# At least, one hex digit is required.
if ch == '}':
self.throwUnexpectedToken()
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if not isHexDigit(ch):
break
code = code * 16 + HEX_CONV[ch]
if code > 0x10FFFF or ch != '}':
self.throwUnexpectedToken()
# UTF-16 Encoding
if (code <= 0xFFFF):
return unichr(code)
cu1 = ((code - 0x10000) >> 10) + 0xD800;
cu2 = ((code - 0x10000) & 1023) + 0xDC00;
return unichr(cu1)+unichr(cu2)
def ccode(self, offset=0):
return ord(self.source[self.index+offset])
def log_err_case(self):
if not DEBUG:
return
print 'INDEX', self.index
print self.source[self.index-10:self.index+10]
print
def at(self, loc):
return None if loc>=self.length else self.source[loc]
def substr(self, le, offset=0):
return self.source[self.index+offset:self.index+offset+le]
def getEscapedIdentifier(self):
d = self.source[self.index]
ch = ord(d)
self.index += 1
# '\u' (U+005C, U+0075) denotes an escaped character.
if (ch == 0x5C):
if (ord(self.source[self.index]) != 0x75):
self.throwUnexpectedToken()
self.index += 1
ch = self.scanHexEscape('u')
if not ch or ch == '\\' or not isIdentifierStart(ch[0]):
self.throwUnexpectedToken()
d = ch
while (self.index < self.length):
ch = self.ccode()
if not isIdentifierPart(ch):
break
self.index += 1
d += unichr(ch)
# '\u' (U+005C, U+0075) denotes an escaped character.
if (ch == 0x5C):
d = d[0: len(d)-1]
if (self.ccode() != 0x75):
self.throwUnexpectedToken()
self.index += 1
ch = self.scanHexEscape('u');
if (not ch or ch == '\\' or not isIdentifierPart(ch[0])):
self.throwUnexpectedToken()
d += ch
return d
def getIdentifier(self):
start = self.index
self.index += 1
while (self.index < self.length):
ch = self.ccode()
if (ch == 0x5C):
# Blackslash (U+005C) marks Unicode escape sequence.
self.index = start
return self.getEscapedIdentifier()
if (isIdentifierPart(ch)):
self.index += 1
else:
break
return self.source[start: self.index]
def scanIdentifier(self):
start = self.index
# Backslash (U+005C) starts an escaped character.
d = self.getEscapedIdentifier() if (self.ccode() == 0x5C) else self.getIdentifier()
# There is no keyword or literal with only one character.
# Thus, it must be an identifier.
if (len(d)==1):
type = Token.Identifier
elif (isKeyword(d)):
type = Token.Keyword
elif (d == 'null'):
type = Token.NullLiteral
elif (i == 'true' or d == 'false'):
type = Token.BooleanLiteral
else:
type = Token.Identifier;
return {
'type': type,
'value': d,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index
}
# 7.7 Punctuators
def scanPunctuator(self):
token = {
'type': Token.Punctuator,
'value': '',
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': self.index,
'end': self.index
}
# Check for most common single-character punctuators.
st = self.source[self.index]
if st == '{':
self.state['curlyStack'].append('{')
self.index += 1
elif st == '}':
self.index += 1
self.state['curlyStack'].pop()
elif st in ['.', '(', ')', ';', ',', '[', ']', ':', '?', '~']:
self.index += 1
else:
# 4-character punctuator.
st = self.substr(4)
if (st == '>>>='):
self.index += 4
else:
# 3-character punctuators.
st = st[0:3]
if st in ['===', '!==', '>>>', '<<=', '>>=']:
self.index += 3
else:
# 2-character punctuators.
st = st[0:2]
if st in ['&&','||','==','!=','+=','-=','*=' ,'/=' ,'++' , '--' , '<<', '>>', '&=', '|=', '^=', '%=', '<=', '>=', '=>']:
self.index += 2
else:
# 1-character punctuators.
st = self.source[self.index]
if st in ['<', '>', '=', '!', '+', '-', '*', '%', '&', '|', '^', '/']:
self.index += 1
if self.index == token['start']:
self.throwUnexpectedToken()
token['end'] = self.index;
token['value'] = st
return token
# 7.8.3 Numeric Literals
def scanHexLiteral(self, start):
number = ''
while (self.index < self.length):
if (not isHexDigit(self.source[self.index])):
break
number += self.source[self.index]
self.index += 1
if not number:
self.throwUnexpectedToken()
if isIdentifierStart(self.ccode()):
self.throwUnexpectedToken()
return {
'type': Token.NumericLiteral,
'value': int(number, 16),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def scanBinaryLiteral(self, start):
number = ''
while (self.index < self.length):
ch = self.source[self.index]
if (ch != '0' and ch != '1'):
break
number += self.source[self.index]
self.index += 1
if not number:
# only 0b or 0B
self.throwUnexpectedToken()
if (self.index < self.length):
ch = self.source[self.index]
# istanbul ignore else
if (isIdentifierStart(ch) or isDecimalDigit(ch)):
self.throwUnexpectedToken();
return {
'type': Token.NumericLiteral,
'value': int(number, 2),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def scanOctalLiteral(self, prefix, start):
if isOctalDigit(prefix):
octal = True
number = '0' + self.source[self.index]
self.index += 1
else:
octal = False
self.index += 1
number = ''
while (self.index < self.length):
if (not isOctalDigit(self.source[self.index])):
break
number += self.source[self.index]
self.index += 1
if (not octal and not number):
# only 0o or 0O
self.throwUnexpectedToken()
if (isIdentifierStart(self.ccode()) or isDecimalDigit(self.ccode())):
self.throwUnexpectedToken()
return {
'type': Token.NumericLiteral,
'value': int(number, 8),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def octalToDecimal(self, ch):
# \0 is not octal escape sequence
octal = (ch != '0')
code = int(ch, 8)
if (self.index < self.length and isOctalDigit(self.source[self.index])):
octal = True
code = code * 8 + int(self.source[self.index], 8)
self.index += 1
# 3 digits are only allowed when string starts
# with 0, 1, 2, 3
if (ch in '0123' and self.index < self.length and isOctalDigit(self.source[self.index])):
code = code * 8 + int((self.source[self.index]), 8)
self.index += 1
return {
'code': code,
'octal': octal}
def isImplicitOctalLiteral(self):
# Implicit octal, unless there is a non-octal digit.
# (Annex B.1.1 on Numeric Literals)
for i in xrange(self.index + 1, self.length):
ch = self.source[i];
if (ch == '8' or ch == '9'):
return False;
if (not isOctalDigit(ch)):
return True
return True
def scanNumericLiteral(self):
ch = self.source[self.index]
assert isDecimalDigit(ch) or (ch == '.'), 'Numeric literal must start with a decimal digit or a decimal point'
start = self.index
number = ''
if ch != '.':
number = self.source[self.index]
self.index += 1
ch = self.source[self.index]
# Hex number starts with '0x'.
# Octal number starts with '0'.
# Octal number in ES6 starts with '0o'.
# Binary number in ES6 starts with '0b'.
if (number == '0'):
if (ch == 'x' or ch == 'X'):
self.index += 1
return self.scanHexLiteral(start);
if (ch == 'b' or ch == 'B'):
self.index += 1
return self.scanBinaryLiteral(start)
if (ch == 'o' or ch == 'O'):
return self.scanOctalLiteral(ch, start)
if (isOctalDigit(ch)):
if (self.isImplicitOctalLiteral()):
return self.scanOctalLiteral(ch, start);
while (isDecimalDigit(self.ccode())):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index];
if (ch == '.'):
number += self.source[self.index]
self.index += 1
while (isDecimalDigit(self.source[self.index])):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index]
if (ch == 'e' or ch == 'E'):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index]
if (ch == '+' or ch == '-'):
number += self.source[self.index]
self.index += 1
if (isDecimalDigit(self.source[self.index])):
while (isDecimalDigit(self.source[self.index])):
number += self.source[self.index]
self.index += 1
else:
self.throwUnexpectedToken()
if (isIdentifierStart(self.source[self.index])):
self.throwUnexpectedToken();
return {
'type': Token.NumericLiteral,
'value': float(number),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
# 7.8.4 String Literals
def _unescape_string(self, string):
'''Perform sctring escape - for regexp literals'''
self.index = 0
self.length = len(string)
self.source = string
self.lineNumber = 0
self.lineStart = 0
octal = False
st = ''
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if ch == '\\':
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch in 'ux':
if (self.source[self.index] == '{'):
self.index += 1
st += self.scanUnicodeCodePointEscape()
else:
unescaped = self.scanHexEscape(ch)
if (not unescaped):
self.throwUnexpectedToken() # with throw I don't know whats the difference
st += unescaped
elif ch=='n':
st += '\n';
elif ch=='r':
st += '\r';
elif ch=='t':
st += '\t';
# elif ch=='b':
# st += '\b';
elif ch=='f':
st += '\f';
elif ch=='v':
st += '\x0B'
elif ch in '89':
self.throwUnexpectedToken() # again with throw....
else:
if isOctalDigit(ch):
octToDec = self.octalToDecimal(ch)
octal = octToDec['octal'] or octal
st += unichr(octToDec['code'])
else:
st += '\\' + ch # DONT ESCAPE!!!
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
else:
st += ch
return st
def scanStringLiteral(self):
st = ''
octal = False
quote = self.source[self.index]
assert quote == '\''or quote == '"', 'String literal must starts with a quote'
start = self.index;
self.index += 1
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if (ch == quote):
quote = ''
break
elif (ch == '\\'):
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch in 'ux':
if (self.source[self.index] == '{'):
self.index += 1
st += self.scanUnicodeCodePointEscape()
else:
unescaped = self.scanHexEscape(ch)
if (not unescaped):
self.throwUnexpectedToken() # with throw I don't know whats the difference
st += unescaped
elif ch=='n':
st += '\n';
elif ch=='r':
st += '\r';
elif ch=='t':
st += '\t';
elif ch=='b':
st += '\b';
elif ch=='f':
st += '\f';
elif ch=='v':
st += '\x0B'
elif ch in '89':
self.throwUnexpectedToken() # again with throw....
else:
if isOctalDigit(ch):
octToDec = self.octalToDecimal(ch)
octal = octToDec['octal'] or octal
st += unichr(octToDec['code'])
else:
st += ch
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
elif isLineTerminator(ch):
break
else:
st += ch;
if (quote != ''):
self.throwUnexpectedToken()
return {
'type': Token.StringLiteral,
'value': st,
'octal': octal,
'lineNumber': self.lineNumber,
'lineStart': self.startLineStart,
'start': start,
'end': self.index}
def scanTemplate(self):
cooked = ''
terminated = False
tail = False
start = self.index
head = (self.source[self.index]=='`')
rawOffset = 2
self.index += 1
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if (ch == '`'):
rawOffset = 1;
tail = True
terminated = True
break
elif (ch == '$'):
if (self.source[self.index] == '{'):
self.state['curlyStack'].append('${')
self.index += 1
terminated = True
break;
cooked += ch
elif (ch == '\\'):
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch=='n':
cooked += '\n'
elif ch=='r':
cooked += '\r'
elif ch=='t':
cooked += '\t'
elif ch in 'ux':
if (self.source[self.index] == '{'):
self.index += 1
cooked += self.scanUnicodeCodePointEscape()
else:
restore = self.index
unescaped = self.scanHexEscape(ch)
if (unescaped):
cooked += unescaped
else:
self.index = restore
cooked += ch
elif ch=='b':
cooked += '\b'
elif ch=='f':
cooked += '\f'
elif ch=='v':
cooked += '\v'
else:
if (ch == '0'):
if isDecimalDigit(self.ccode()):
# Illegal: \01 \02 and so on
self.throwError(Messages.TemplateOctalLiteral)
cooked += '\0'
elif (isOctalDigit(ch)):
# Illegal: \1 \2
self.throwError(Messages.TemplateOctalLiteral)
else:
cooked += ch
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
elif (isLineTerminator(ch)):
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] =='\n'):
self.index += 1
self.lineStart = self.index
cooked += '\n'
else:
cooked += ch;
if (not terminated):
self.throwUnexpectedToken()
if (not head):
self.state['curlyStack'].pop();
return {
'type': Token.Template,
'value': {
'cooked': cooked,
'raw': self.source[start + 1:self.index - rawOffset]},
'head': head,
'tail': tail,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def testRegExp(self, pattern, flags):
#todo: you should return python regexp object
return (pattern, flags)
def scanRegExpBody(self):
ch = self.source[self.index]
assert ch == '/', 'Regular expression literal must start with a slash'
st = ch
self.index += 1
classMarker = False
terminated = False
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
st += ch
if (ch == '\\'):
ch = self.source[self.index]
self.index += 1
# ECMA-262 7.8.5
if (isLineTerminator(ch)):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
st += ch
elif (isLineTerminator(ch)):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
elif (classMarker):
if (ch == ']'):
classMarker = False
else:
if (ch == '/'):
terminated = True
break
elif (ch == '['):
classMarker = True;
if (not terminated):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
# Exclude leading and trailing slash.
body = st[1:-1]
return {
'value': body,
'literal': st}
def scanRegExpFlags(self):
st = ''
flags = ''
while (self.index < self.length):
ch = self.source[self.index]
if (not isIdentifierPart(ch)):
break
self.index += 1
if (ch == '\\' and self.index < self.length):
ch = self.source[self.index]
if (ch == 'u'):
self.index += 1
restore = self.index
ch = self.scanHexEscape('u')
if (ch):
flags += ch
st += '\\u'
while restore < self.index:
st += self.source[restore]
restore += 1
else:
self.index = restore
flags += 'u'
st += '\\u'
self.tolerateUnexpectedToken()
else:
st += '\\'
self.tolerateUnexpectedToken()
else:
flags += ch
st += ch
return {
'value': flags,
'literal': st}
def scanRegExp(self):
self.scanning = True
self.lookahead = None
self.skipComment()
start = self.index
body = self.scanRegExpBody()
flags = self.scanRegExpFlags()
value = self.testRegExp(body['value'], flags['value'])
scanning = False
return {
'literal': body['literal'] + flags['literal'],
'value': value,
'regex': {
'pattern': body['value'],
'flags': flags['value']
},
'start': start,
'end': self.index}
def collectRegex(self):
self.skipComment();
return self.scanRegExp()
def isIdentifierName(self, token):
return token['type'] in [1,3,4,5]
#def advanceSlash(self): ???
def advance(self):
if (self.index >= self.length):
return {
'type': Token.EOF,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': self.index,
'end': self.index}
ch = self.ccode()
if isIdentifierStart(ch):
token = self.scanIdentifier()
if (self.strict and isStrictModeReservedWord(token['value'])):
token['type'] = Token.Keyword
return token
# Very common: ( and ) and ;
if (ch == 0x28 or ch == 0x29 or ch == 0x3B):
return self.scanPunctuator()
# String literal starts with single quote (U+0027) or double quote (U+0022).
if (ch == 0x27 or ch == 0x22):
return self.scanStringLiteral()
# Dot (.) U+002E can also start a floating-point number, hence the need
# to check the next character.
if (ch == 0x2E):
if (isDecimalDigit(self.ccode(1))):
return self.scanNumericLiteral()
return self.scanPunctuator();
if (isDecimalDigit(ch)):
return self.scanNumericLiteral()
# Slash (/) U+002F can also start a regex.
#if (extra.tokenize && ch == 0x2F):
# return advanceSlash();
# Template literals start with ` (U+0060) for template head
# or } (U+007D) for template middle or template tail.
if (ch == 0x60 or (ch == 0x7D and self.state['curlyStack'][len(self.state['curlyStack']) - 1] == '${')):
return self.scanTemplate()
return self.scanPunctuator();
#def collectToken(self):
# loc = {
# 'start': {
# 'line': self.lineNumber,
# 'column': self.index - self.lineStart}}
#
# token = self.advance()
#
# loc['end'] = {
# 'line': self.lineNumber,
# 'column': self.index - self.lineStart}
# if (token['type'] != Token.EOF):
# value = self.source[token['start']: token['end']]
# entry = {
# 'type': TokenName[token['type']],
# 'value': value,
# 'range': [token['start'], token['end']],
# 'loc': loc}
# if (token.get('regex')):
# entry['regex'] = {
# 'pattern': token['regex']['pattern'],
# 'flags': token['regex']['flags']}
# self.extra['tokens'].append(entry)
# return token;
def lex(self):
self.scanning = True
self.lastIndex = self.index
self.lastLineNumber = self.lineNumber
self.lastLineStart = self.lineStart
self.skipComment()
token = self.lookahead
self.startIndex = self.index
self.startLineNumber = self.lineNumber
self.startLineStart = self.lineStart
self.lookahead = self.advance()
self.scanning = False
return token
def peek(self):
self.scanning = True
self.skipComment()
self.lastIndex = self.index
self.lastLineNumber = self.lineNumber
self.lastLineStart = self.lineStart
self.startIndex = self.index
self.startLineNumber = self.lineNumber
self.startLineStart = self.lineStart
self.lookahead = self.advance()
self.scanning = False
def createError(self, line, pos, description):
self.log_err_case()
from js2py.base import ERRORS, Js, JsToPyException
error = ERRORS['SyntaxError']('Line ' + unicode(line) + ': ' + unicode(description))
error.put('index', Js(pos))
error.put('lineNumber', Js(line))
error.put('column', Js(pos - (self.lineStart if self.scanning else self.lastLineStart) + 1))
error.put('description', Js(description))
return JsToPyException(error)
# Throw an exception
def throwError(self, messageFormat, *args):
msg = messageFormat % tuple(unicode(e) for e in args)
raise self.createError(self.lastLineNumber, self.lastIndex, msg);
def tolerateError(self, messageFormat, *args):
return self.throwError(messageFormat, *args)
# Throw an exception because of the token.
def unexpectedTokenError(self, token={}, message=''):
msg = message or Messages.UnexpectedToken
if (token):
typ = token['type']
if (not message):
if typ == Token.EOF: msg = Messages.UnexpectedEOS
elif (typ == Token.Identifier): msg = Messages.UnexpectedIdentifier
elif (typ == Token.NumericLiteral): msg = Messages.UnexpectedNumber
elif (typ == Token.StringLiteral): msg = Messages.UnexpectedString
elif (typ == Token.Template): msg = Messages.UnexpectedTemplate
else: msg = Messages.UnexpectedToken;
if (typ == Token.Keyword):
if (isFutureReservedWord(token['value'])):
msg = Messages.UnexpectedReserved
elif (self.strict and isStrictModeReservedWord(token['value'])):
msg = Messages.StrictReservedWord
value = token['value']['raw'] if (typ == Token.Template) else token.get('value')
else:
value = 'ILLEGAL'
msg = msg.replace('%s', unicode(value))
return (self.createError(token['lineNumber'], token['start'], msg) if (token and token.get('lineNumber')) else
self.createError(self.lineNumber if self.scanning else self.lastLineNumber, self.index if self.scanning else self.lastIndex, msg))
def throwUnexpectedToken(self, token={}, message=''):
raise self.unexpectedTokenError(token, message)
def tolerateUnexpectedToken(self, token={}, message=''):
self.throwUnexpectedToken(token, message)
# Expect the next token to match the specified punctuator.
# If not, an exception will be thrown.
def expect(self, value):
token = self.lex()
if (token['type'] != Token.Punctuator or token['value'] != value):
self.throwUnexpectedToken(token)
#/**
# * @name expectCommaSeparator
# * @description Quietly expect a comma when in tolerant mode, otherwise delegates
# * to <code>expect(value)</code>
# * @since 2.0
# */
def expectCommaSeparator(self):
self.expect(',')
# Expect the next token to match the specified keyword.
# If not, an exception will be thrown.
def expectKeyword(self, keyword):
token = self.lex();
if (token['type'] != Token.Keyword or token['value'] != keyword):
self.throwUnexpectedToken(token)
# Return true if the next token matches the specified punctuator.
def match(self, value):
return self.lookahead['type'] == Token.Punctuator and self.lookahead['value'] == value
# Return true if the next token matches the specified keyword
def matchKeyword(self, keyword):
return self.lookahead['type'] == Token.Keyword and self.lookahead['value'] == keyword
# Return true if the next token matches the specified contextual keyword
# (where an identifier is sometimes a keyword depending on the context)
def matchContextualKeyword(self, keyword):
return self.lookahead['type'] == Token.Identifier and self.lookahead['value'] == keyword
# Return true if the next token is an assignment operator
def matchAssign(self):
if (self.lookahead['type'] != Token.Punctuator):
return False;
op = self.lookahead['value']
return op in ['=','*=', '/=','%=', '+=', '-=', '<<=', '>>=', '>>>=', '&=' , '^=' , '|=']
def consumeSemicolon(self):
# Catch the very common case first: immediately a semicolon (U+003B).
if (self.at(self.startIndex) == ';' or self.match(';')):
self.lex()
return
if (self.hasLineTerminator):
return
# TODO: FIXME(ikarienator): this is seemingly an issue in the previous location info convention.
self.lastIndex = self.startIndex
self.lastLineNumber = self.startLineNumber
self.lastLineStart = self.startLineStart
if (self.lookahead['type'] != Token.EOF and not self.match('}')):
self.throwUnexpectedToken(self.lookahead)
# // Cover grammar support.
# //
# // When an assignment expression position starts with an left parenthesis, the determination of the type
# // of the syntax is to be deferred arbitrarily long until the end of the parentheses pair (plus a lookahead)
# // or the first comma. This situation also defers the determination of all the expressions nested in the pair.
# //
# // There are three productions that can be parsed in a parentheses pair that needs to be determined
# // after the outermost pair is closed. They are:
# //
# // 1. AssignmentExpression
# // 2. BindingElements
# // 3. AssignmentTargets
# //
# // In order to avoid exponential backtracking, we use two flags to denote if the production can be
# // binding element or assignment target.
# //
# // The three productions have the relationship:
# //
# // BindingElements <= AssignmentTargets <= AssignmentExpression
# //
# // with a single exception that CoverInitializedName when used directly in an Expression, generates
# // an early error. Therefore, we need the third state, firstCoverInitializedNameError, to track the
# // first usage of CoverInitializedName and report it when we reached the end of the parentheses pair.
# //
# // isolateCoverGrammar function runs the given parser function with a new cover grammar context, and it does not
# // effect the current flags. This means the production the parser parses is only used as an expression. Therefore
# // the CoverInitializedName check is conducted.
# //
# // inheritCoverGrammar function runs the given parse function with a new cover grammar context, and it propagates
# // the flags outside of the parser. This means the production the parser parses is used as a part of a potential
# // pattern. The CoverInitializedName check is deferred.
def isolateCoverGrammar(self, parser):
oldIsBindingElement = self.isBindingElement
oldIsAssignmentTarget = self.isAssignmentTarget
oldFirstCoverInitializedNameError = self.firstCoverInitializedNameError
self.isBindingElement = true
self.isAssignmentTarget = true
self.firstCoverInitializedNameError = null
result = parser()
if (self.firstCoverInitializedNameError != null):
self.throwUnexpectedToken(self.firstCoverInitializedNameError)
self.isBindingElement = oldIsBindingElement
self.isAssignmentTarget = oldIsAssignmentTarget
self.firstCoverInitializedNameError = oldFirstCoverInitializedNameError
return result
def inheritCoverGrammar(self, parser):
oldIsBindingElement = self.isBindingElement
oldIsAssignmentTarget = self.isAssignmentTarget
oldFirstCoverInitializedNameError = self.firstCoverInitializedNameError
self.isBindingElement = true
self.isAssignmentTarget = true
self.firstCoverInitializedNameError = null
result = parser()
self.isBindingElement = self.isBindingElement and oldIsBindingElement
self.isAssignmentTarget = self.isAssignmentTarget and oldIsAssignmentTarget
self.firstCoverInitializedNameError = oldFirstCoverInitializedNameError or self.firstCoverInitializedNameError
return result
def parseArrayPattern(self):
node = Node()
elements = []
self.expect('[');
while (not self.match(']')):
if (self.match(',')):
self.lex()
elements.append(null)
else:
if (self.match('...')):
restNode = Node()
self.lex()
rest = self.parseVariableIdentifier()
elements.append(restNode.finishRestElement(rest))
break
else:
elements.append(self.parsePatternWithDefault())
if (not self.match(']')):
self.expect(',')
self.expect(']')
return node.finishArrayPattern(elements)
def parsePropertyPattern(self):
node = Node()
computed = self.match('[')
if (self.lookahead['type'] == Token.Identifier):
key = self.parseVariableIdentifier()
if (self.match('=')):
self.lex();
init = self.parseAssignmentExpression()
return node.finishProperty(
'init', key, false, WrappingNode(key).finishAssignmentPattern(key, init), false, false)
elif (not self.match(':')):
return node.finishProperty('init', key, false, key, false, true)
else:
key = self.parseObjectPropertyKey()
self.expect(':')
init = self.parsePatternWithDefault()
return node.finishProperty('init', key, computed, init, false, false)
def parseObjectPattern(self):
node = Node()
properties = []
self.expect('{')
while (not self.match('}')):
properties.append(self.parsePropertyPattern())
if (not self.match('}')):
self.expect(',')
self.lex()
return node.finishObjectPattern(properties)
def parsePattern(self):
if (self.lookahead['type'] == Token.Identifier):
return self.parseVariableIdentifier()
elif (self.match('[')):
return self.parseArrayPattern()
elif (self.match('{')):
return self.parseObjectPattern()
self.throwUnexpectedToken(self.lookahead)
def parsePatternWithDefault(self):
startToken = self.lookahead
pattern = self.parsePattern()
if (self.match('=')):
self.lex()
right = self.isolateCoverGrammar(self.parseAssignmentExpression)
pattern = WrappingNode(startToken).finishAssignmentPattern(pattern, right)
return pattern
# 11.1.4 Array Initialiser
def parseArrayInitialiser(self):
elements = []
node = Node()
self.expect('[')
while (not self.match(']')):
if (self.match(',')):
self.lex()
elements.append(null)
elif (self.match('...')):
restSpread = Node()
self.lex()
restSpread.finishSpreadElement(self.inheritCoverGrammar(self.parseAssignmentExpression))
if (not self.match(']')):
self.isAssignmentTarget = self.isBindingElement = false
self.expect(',')
elements.append(restSpread)
else:
elements.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
if (not self.match(']')):
self.expect(',')
self.lex();
return node.finishArrayExpression(elements)
# 11.1.5 Object Initialiser
def parsePropertyFunction(self, node, paramInfo):
self.isAssignmentTarget = self.isBindingElement = false;
previousStrict = self.strict;
body = self.isolateCoverGrammar(self.parseFunctionSourceElements);
if (self.strict and paramInfo['firstRestricted']):
self.tolerateUnexpectedToken(paramInfo['firstRestricted'], paramInfo.get('message'))
if (self.strict and paramInfo['stricted']):
self.tolerateUnexpectedToken(paramInfo['stricted'], paramInfo.get('message'));
self.strict = previousStrict;
return node.finishFunctionExpression(null, paramInfo['params'], paramInfo['defaults'], body)
def parsePropertyMethodFunction(self):
node = Node();
params = self.parseParams();
method = self.parsePropertyFunction(node, params);
return method;
def parseObjectPropertyKey(self):
node = Node()
token = self.lex();
# // Note: This function is called only from parseObjectProperty(), where
# // EOF and Punctuator tokens are already filtered out.
typ = token['type']
if typ in [Token.StringLiteral, Token.NumericLiteral]:
if self.strict and token['octal']:
self.tolerateUnexpectedToken(token, Messages.StrictOctalLiteral);
return node.finishLiteral(token);
elif typ in [Token.Identifier, Token.BooleanLiteral, Token.NullLiteral, Token.Keyword]:
return node.finishIdentifier(token['value']);
elif typ==Token.Punctuator:
if (token['value'] == '['):
expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
self.expect(']')
return expr
self.throwUnexpectedToken(token)
def lookaheadPropertyName(self):
typ = self.lookahead['type']
if typ in [Token.Identifier, Token.StringLiteral, Token.BooleanLiteral, Token.NullLiteral, Token.NumericLiteral, Token.Keyword]:
return true
if typ == Token.Punctuator:
return self.lookahead['value'] == '['
return false
# // This function is to try to parse a MethodDefinition as defined in 14.3. But in the case of object literals,
# // it might be called at a position where there is in fact a short hand identifier pattern or a data property.
# // This can only be determined after we consumed up to the left parentheses.
# //
# // In order to avoid back tracking, it returns `null` if the position is not a MethodDefinition and the caller
# // is responsible to visit other options.
def tryParseMethodDefinition(self, token, key, computed, node):
if (token['type'] == Token.Identifier):
# check for `get` and `set`;
if (token['value'] == 'get' and self.lookaheadPropertyName()):
computed = self.match('[');
key = self.parseObjectPropertyKey()
methodNode = Node()
self.expect('(')
self.expect(')')
value = self.parsePropertyFunction(methodNode, {
'params': [],
'defaults': [],
'stricted': null,
'firstRestricted': null,
'message': null
})
return node.finishProperty('get', key, computed, value, false, false)
elif (token['value'] == 'set' and self.lookaheadPropertyName()):
computed = self.match('[')
key = self.parseObjectPropertyKey()
methodNode = Node()
self.expect('(')
options = {
'params': [],
'defaultCount': 0,
'defaults': [],
'firstRestricted': null,
'paramSet': {}
}
if (self.match(')')):
self.tolerateUnexpectedToken(self.lookahead);
else:
self.parseParam(options);
if (options['defaultCount'] == 0):
options['defaults'] = []
self.expect(')')
value = self.parsePropertyFunction(methodNode, options);
return node.finishProperty('set', key, computed, value, false, false);
if (self.match('(')):
value = self.parsePropertyMethodFunction();
return node.finishProperty('init', key, computed, value, true, false)
return null;
def checkProto(self, key, computed, hasProto):
if (computed == false and (key['type'] == Syntax.Identifier and key.name == '__proto__' or
key['type'] == Syntax.Literal and key.value == '__proto__')):
if (hasProto.value):
self.tolerateError(Messages.DuplicateProtoProperty);
else:
hasProto.value = true;
def parseObjectProperty(self, hasProto):
token = self.lookahead
node = Node()
computed = self.match('[');
key = self.parseObjectPropertyKey();
maybeMethod = self.tryParseMethodDefinition(token, key, computed, node)
if (maybeMethod):
self.checkProto(maybeMethod.key, maybeMethod.computed, hasProto);
return maybeMethod;
#// init property or short hand property.
self.checkProto(key, computed, hasProto);
if (self.match(':')):
self.lex();
value = self.inheritCoverGrammar(self.parseAssignmentExpression)
return node.finishProperty('init', key, computed, value, false, false)
if (token['type'] == Token.Identifier):
if (self.match('=')):
self.firstCoverInitializedNameError = self.lookahead;
self.lex();
value = self.isolateCoverGrammar(self.parseAssignmentExpression);
return node.finishProperty('init', key, computed,
WrappingNode(token).finishAssignmentPattern(key, value), false, true)
return node.finishProperty('init', key, computed, key, false, true)
self.throwUnexpectedToken(self.lookahead)
def parseObjectInitialiser(self):
properties = []
hasProto = {'value': false}
node = Node();
self.expect('{');
while (not self.match('}')):
properties.append(self.parseObjectProperty(hasProto));
if (not self.match('}')):
self.expectCommaSeparator()
self.expect('}');
return node.finishObjectExpression(properties)
def reinterpretExpressionAsPattern(self, expr):
typ = (expr['type'])
if typ in [Syntax.Identifier, Syntax.MemberExpression, Syntax.RestElement, Syntax.AssignmentPattern]:
pass
elif typ == Syntax.SpreadElement:
expr['type'] = Syntax.RestElement
self.reinterpretExpressionAsPattern(expr.argument)
elif typ == Syntax.ArrayExpression:
expr['type'] = Syntax.ArrayPattern
for i in xrange(len(expr['elements'])):
if (expr['elements'][i] != null):
self.reinterpretExpressionAsPattern(expr['elements'][i])
elif typ == Syntax.ObjectExpression:
expr['type'] = Syntax.ObjectPattern
for i in xrange(len(expr['properties'])):
self.reinterpretExpressionAsPattern(expr['properties'][i]['value']);
elif Syntax.AssignmentExpression:
expr['type'] = Syntax.AssignmentPattern;
self.reinterpretExpressionAsPattern(expr['left'])
else:
#// Allow other node type for tolerant parsing.
return
def parseTemplateElement(self, option):
if (self.lookahead['type'] != Token.Template or (option['head'] and not self.lookahead['head'])):
self.throwUnexpectedToken()
node = Node();
token = self.lex();
return node.finishTemplateElement({ 'raw': token['value']['raw'], 'cooked': token['value']['cooked'] }, token['tail'])
def parseTemplateLiteral(self):
node = Node()
quasi = self.parseTemplateElement({ 'head': true })
quasis = [quasi]
expressions = []
while (not quasi['tail']):
expressions.append(self.parseExpression());
quasi = self.parseTemplateElement({ 'head': false });
quasis.append(quasi)
return node.finishTemplateLiteral(quasis, expressions)
# 11.1.6 The Grouping Operator
def parseGroupExpression(self):
self.expect('(');
if (self.match(')')):
self.lex();
if (not self.match('=>')):
self.expect('=>')
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': []}
startToken = self.lookahead
if (self.match('...')):
expr = self.parseRestElement();
self.expect(')');
if (not self.match('=>')):
self.expect('=>')
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': [expr]}
self.isBindingElement = true;
expr = self.inheritCoverGrammar(self.parseAssignmentExpression);
if (self.match(',')):
self.isAssignmentTarget = false;
expressions = [expr]
while (self.startIndex < self.length):
if (not self.match(',')):
break
self.lex();
if (self.match('...')):
if (not self.isBindingElement):
self.throwUnexpectedToken(self.lookahead)
expressions.append(self.parseRestElement())
self.expect(')');
if (not self.match('=>')):
self.expect('=>');
self.isBindingElement = false
for i in xrange(len(expressions)):
self.reinterpretExpressionAsPattern(expressions[i])
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': expressions}
expressions.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
expr = WrappingNode(startToken).finishSequenceExpression(expressions);
self.expect(')')
if (self.match('=>')):
if (not self.isBindingElement):
self.throwUnexpectedToken(self.lookahead);
if (expr['type'] == Syntax.SequenceExpression):
for i in xrange(len(expr.expressions)):
self.reinterpretExpressionAsPattern(expr['expressions'][i])
else:
self.reinterpretExpressionAsPattern(expr);
expr = {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': expr['expressions'] if expr['type'] == Syntax.SequenceExpression else [expr]}
self.isBindingElement = false
return expr
# 11.1 Primary Expressions
def parsePrimaryExpression(self):
if (self.match('(')):
self.isBindingElement = false;
return self.inheritCoverGrammar(self.parseGroupExpression)
if (self.match('[')):
return self.inheritCoverGrammar(self.parseArrayInitialiser)
if (self.match('{')):
return self.inheritCoverGrammar(self.parseObjectInitialiser)
typ = self.lookahead['type']
node = Node();
if (typ == Token.Identifier):
expr = node.finishIdentifier(self.lex()['value']);
elif (typ == Token.StringLiteral or typ == Token.NumericLiteral):
self.isAssignmentTarget = self.isBindingElement = false
if (self.strict and self.lookahead.get('octal')):
self.tolerateUnexpectedToken(self.lookahead, Messages.StrictOctalLiteral)
expr = node.finishLiteral(self.lex())
elif (typ == Token.Keyword):
self.isAssignmentTarget = self.isBindingElement = false
if (self.matchKeyword('function')):
return self.parseFunctionExpression()
if (self.matchKeyword('this')):
self.lex()
return node.finishThisExpression()
if (self.matchKeyword('class')):
return self.parseClassExpression()
self.throwUnexpectedToken(self.lex())
elif (typ == Token.BooleanLiteral):
isAssignmentTarget = self.isBindingElement = false
token = self.lex();
token['value'] = (token['value'] == 'true')
expr = node.finishLiteral(token)
elif (typ == Token.NullLiteral):
self.isAssignmentTarget = self.isBindingElement = false
token = self.lex()
token['value'] = null;
expr = node.finishLiteral(token)
elif (self.match('/') or self.match('/=')):
self.isAssignmentTarget = self.isBindingElement = false;
self.index = self.startIndex;
token = self.scanRegExp(); # hehe, here you are!
self.lex();
expr = node.finishLiteral(token);
elif (typ == Token.Template):
expr = self.parseTemplateLiteral()
else:
self.throwUnexpectedToken(self.lex());
return expr;
# 11.2 Left-Hand-Side Expressions
def parseArguments(self):
args = [];
self.expect('(');
if (not self.match(')')):
while (self.startIndex < self.length):
args.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
if (self.match(')')):
break
self.expectCommaSeparator()
self.expect(')')
return args;
def parseNonComputedProperty(self):
node = Node()
token = self.lex();
if (not self.isIdentifierName(token)):
self.throwUnexpectedToken(token)
return node.finishIdentifier(token['value'])
def parseNonComputedMember(self):
self.expect('.')
return self.parseNonComputedProperty();
def parseComputedMember(self):
self.expect('[')
expr = self.isolateCoverGrammar(self.parseExpression)
self.expect(']')
return expr
def parseNewExpression(self):
node = Node()
self.expectKeyword('new')
callee = self.isolateCoverGrammar(self.parseLeftHandSideExpression)
args = self.parseArguments() if self.match('(') else []
self.isAssignmentTarget = self.isBindingElement = false
return node.finishNewExpression(callee, args)
def parseLeftHandSideExpressionAllowCall(self):
previousAllowIn = self.state['allowIn']
startToken = self.lookahead;
self.state['allowIn'] = true;
if (self.matchKeyword('super') and self.state['inFunctionBody']):
expr = Node();
self.lex();
expr = expr.finishSuper()
if (not self.match('(') and not self.match('.') and not self.match('[')):
self.throwUnexpectedToken(self.lookahead);
else:
expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression)
while True:
if (self.match('.')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseNonComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('.', expr, property)
elif (self.match('(')):
self.isBindingElement = false;
self.isAssignmentTarget = false;
args = self.parseArguments();
expr = WrappingNode(startToken).finishCallExpression(expr, args)
elif (self.match('[')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('[', expr, property)
elif (self.lookahead['type'] == Token.Template and self.lookahead['head']):
quasi = self.parseTemplateLiteral()
expr = WrappingNode(startToken).finishTaggedTemplateExpression(expr, quasi)
else:
break
self.state['allowIn'] = previousAllowIn
return expr
def parseLeftHandSideExpression(self):
assert self.state['allowIn'], 'callee of new expression always allow in keyword.'
startToken = self.lookahead
if (self.matchKeyword('super') and self.state['inFunctionBody']):
expr = Node();
self.lex();
expr = expr.finishSuper();
if (not self.match('[') and not self.match('.')):
self.throwUnexpectedToken(self.lookahead)
else:
expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression);
while True:
if (self.match('[')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('[', expr, property)
elif (self.match('.')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseNonComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('.', expr, property);
elif (self.lookahead['type'] == Token.Template and self.lookahead['head']):
quasi = self.parseTemplateLiteral();
expr = WrappingNode(startToken).finishTaggedTemplateExpression(expr, quasi)
else:
break
return expr
# 11.3 Postfix Expressions
def parsePostfixExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseLeftHandSideExpressionAllowCall)
if (not self.hasLineTerminator and self.lookahead['type'] == Token.Punctuator):
if (self.match('++') or self.match('--')):
# 11.3.1, 11.3.2
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateError(Messages.StrictLHSPostfix)
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment);
self.isAssignmentTarget = self.isBindingElement = false;
token = self.lex();
expr = WrappingNode(startToken).finishPostfixExpression(token['value'], expr);
return expr;
# 11.4 Unary Operators
def parseUnaryExpression(self):
if (self.lookahead['type'] != Token.Punctuator and self.lookahead['type'] != Token.Keyword):
expr = self.parsePostfixExpression();
elif (self.match('++') or self.match('--')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
# 11.4.4, 11.4.5
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateError(Messages.StrictLHSPrefix)
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment)
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr)
self.isAssignmentTarget = self.isBindingElement = false
elif (self.match('+') or self.match('-') or self.match('~') or self.match('!')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr)
self.isAssignmentTarget = self.isBindingElement = false;
elif (self.matchKeyword('delete') or self.matchKeyword('void') or self.matchKeyword('typeof')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr);
if (self.strict and expr.operator == 'delete' and expr.argument.type == Syntax.Identifier):
self.tolerateError(Messages.StrictDelete)
self.isAssignmentTarget = self.isBindingElement = false;
else:
expr = self.parsePostfixExpression()
return expr
def binaryPrecedence(self, token, allowIn):
prec = 0;
typ = token['type']
if (typ != Token.Punctuator and typ != Token.Keyword):
return 0;
val = token['value']
if val == 'in' and not allowIn:
return 0
return PRECEDENCE.get(val, 0)
# 11.5 Multiplicative Operators
# 11.6 Additive Operators
# 11.7 Bitwise Shift Operators
# 11.8 Relational Operators
# 11.9 Equality Operators
# 11.10 Binary Bitwise Operators
# 11.11 Binary Logical Operators
def parseBinaryExpression(self):
marker = self.lookahead;
left = self.inheritCoverGrammar(self.parseUnaryExpression);
token = self.lookahead;
prec = self.binaryPrecedence(token, self.state['allowIn']);
if (prec == 0):
return left
self.isAssignmentTarget = self.isBindingElement = false;
token['prec'] = prec
self.lex()
markers = [marker, self.lookahead];
right = self.isolateCoverGrammar(self.parseUnaryExpression);
stack = [left, token, right];
while True:
prec = self.binaryPrecedence(self.lookahead, self.state['allowIn'])
if not prec > 0:
break
# Reduce: make a binary expression from the three topmost entries.
while ((len(stack) > 2) and (prec <= stack[len(stack) - 2]['prec'])):
right = stack.pop();
operator = stack.pop()['value']
left = stack.pop()
markers.pop()
expr = WrappingNode(markers[len(markers) - 1]).finishBinaryExpression(operator, left, right)
stack.append(expr)
# Shift
token = self.lex();
token['prec'] = prec;
stack.append(token);
markers.append(self.lookahead);
expr = self.isolateCoverGrammar(self.parseUnaryExpression);
stack.append(expr);
# Final reduce to clean-up the stack.
i = len(stack) - 1;
expr = stack[i]
markers.pop()
while (i > 1):
expr = WrappingNode(markers.pop()).finishBinaryExpression(stack[i - 1]['value'], stack[i - 2], expr);
i -= 2
return expr
# 11.12 Conditional Operator
def parseConditionalExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseBinaryExpression);
if (self.match('?')):
self.lex()
previousAllowIn = self.state['allowIn']
self.state['allowIn'] = true;
consequent = self.isolateCoverGrammar(self.parseAssignmentExpression);
self.state['allowIn'] = previousAllowIn;
self.expect(':');
alternate = self.isolateCoverGrammar(self.parseAssignmentExpression)
expr = WrappingNode(startToken).finishConditionalExpression(expr, consequent, alternate);
self.isAssignmentTarget = self.isBindingElement = false;
return expr
# [ES6] 14.2 Arrow Function
def parseConciseBody(self):
if (self.match('{')):
return self.parseFunctionSourceElements()
return self.isolateCoverGrammar(self.parseAssignmentExpression)
def checkPatternParam(self, options, param):
typ = param.type
if typ == Syntax.Identifier:
self.validateParam(options, param, param.name);
elif typ == Syntax.RestElement:
self.checkPatternParam(options, param.argument)
elif typ == Syntax.AssignmentPattern:
self.checkPatternParam(options, param.left)
elif typ == Syntax.ArrayPattern:
for i in xrange(len(param.elements)):
if (param.elements[i] != null):
self.checkPatternParam(options, param.elements[i]);
else:
assert typ == Syntax.ObjectPattern, 'Invalid type'
for i in xrange(len(param.properties)):
self.checkPatternParam(options, param.properties[i].value);
def reinterpretAsCoverFormalsList(self, expr):
defaults = [];
defaultCount = 0;
params = [expr];
typ = expr.type
if typ == Syntax.Identifier:
pass
elif typ == PlaceHolders.ArrowParameterPlaceHolder:
params = expr.params
else:
return null
options = {
'paramSet': {}}
le = len(params)
for i in xrange(le):
param = params[i]
if param.type == Syntax.AssignmentPattern:
params[i] = param.left;
defaults.append(param.right);
defaultCount += 1
self.checkPatternParam(options, param.left);
else:
self.checkPatternParam(options, param);
params[i] = param;
defaults.append(null);
if (options.get('message') == Messages.StrictParamDupe):
token = options['stricted'] if self.strict else options['firstRestricted']
self.throwUnexpectedToken(token, options.get('message'));
if (defaultCount == 0):
defaults = []
return {
'params': params,
'defaults': defaults,
'stricted': options['stricted'],
'firstRestricted': options['firstRestricted'],
'message': options.get('message')}
def parseArrowFunctionExpression(self, options, node):
if (self.hasLineTerminator):
self.tolerateUnexpectedToken(self.lookahead)
self.expect('=>')
previousStrict = self.strict;
body = self.parseConciseBody();
if (self.strict and options['firstRestricted']):
self.throwUnexpectedToken(options['firstRestricted'], options.get('message'));
if (self.strict and options['stricted']):
self.tolerateUnexpectedToken(options['stricted'], options['message']);
self.strict = previousStrict
return node.finishArrowFunctionExpression(options['params'], options['defaults'], body, body.type != Syntax.BlockStatement)
# 11.13 Assignment Operators
def parseAssignmentExpression(self):
startToken = self.lookahead;
token = self.lookahead;
expr = self.parseConditionalExpression();
if (expr.type == PlaceHolders.ArrowParameterPlaceHolder or self.match('=>')):
self.isAssignmentTarget = self.isBindingElement = false;
lis = self.reinterpretAsCoverFormalsList(expr)
if (lis):
self.firstCoverInitializedNameError = null;
return self.parseArrowFunctionExpression(lis, WrappingNode(startToken))
return expr
if (self.matchAssign()):
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment)
# 11.13.1
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateUnexpectedToken(token, Messages.StrictLHSAssignment);
if (not self.match('=')):
self.isAssignmentTarget = self.isBindingElement = false;
else:
self.reinterpretExpressionAsPattern(expr)
token = self.lex();
right = self.isolateCoverGrammar(self.parseAssignmentExpression)
expr = WrappingNode(startToken).finishAssignmentExpression(token['value'], expr, right);
self.firstCoverInitializedNameError = null
return expr
# 11.14 Comma Operator
def parseExpression(self):
startToken = self.lookahead
expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
if (self.match(',')):
expressions = [expr];
while (self.startIndex < self.length):
if (not self.match(',')):
break
self.lex();
expressions.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
expr = WrappingNode(startToken).finishSequenceExpression(expressions);
return expr
# 12.1 Block
def parseStatementListItem(self):
if (self.lookahead['type'] == Token.Keyword):
val = (self.lookahead['value'])
if val=='export':
if (self.sourceType != 'module'):
self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalExportDeclaration)
return self.parseExportDeclaration();
elif val == 'import':
if (self.sourceType != 'module'):
self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalImportDeclaration);
return self.parseImportDeclaration();
elif val == 'const' or val == 'let':
return self.parseLexicalDeclaration({'inFor': false});
elif val == 'function':
return self.parseFunctionDeclaration(Node());
elif val == 'class':
return self.parseClassDeclaration();
elif val == 'pyimport': # <<<<< MODIFIED HERE
return self.parsePyimportStatement()
return self.parseStatement();
def parsePyimportStatement(self):
n = Node()
self.lex()
n.finishPyimport(self.parseVariableIdentifier())
self.consumeSemicolon()
return n
def parseStatementList(self):
list = [];
while (self.startIndex < self.length):
if (self.match('}')):
break
list.append(self.parseStatementListItem())
return list
def parseBlock(self):
node = Node();
self.expect('{');
block = self.parseStatementList()
self.expect('}');
return node.finishBlockStatement(block);
# 12.2 Variable Statement
def parseVariableIdentifier(self):
node = Node()
token = self.lex()
if (token['type'] != Token.Identifier):
if (self.strict and token['type'] == Token.Keyword and isStrictModeReservedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictReservedWord);
else:
self.throwUnexpectedToken(token)
return node.finishIdentifier(token['value'])
def parseVariableDeclaration(self):
init = null
node = Node();
d = self.parsePattern();
# 12.2.1
if (self.strict and isRestrictedWord(d.name)):
self.tolerateError(Messages.StrictVarName);
if (self.match('=')):
self.lex();
init = self.isolateCoverGrammar(self.parseAssignmentExpression);
elif (d.type != Syntax.Identifier):
self.expect('=')
return node.finishVariableDeclarator(d, init)
def parseVariableDeclarationList(self):
lis = []
while True:
lis.append(self.parseVariableDeclaration())
if (not self.match(',')):
break
self.lex();
if not (self.startIndex < self.length):
break
return lis;
def parseVariableStatement(self, node):
self.expectKeyword('var')
declarations = self.parseVariableDeclarationList()
self.consumeSemicolon()
return node.finishVariableDeclaration(declarations)
def parseLexicalBinding(self, kind, options):
init = null
node = Node()
d = self.parsePattern();
# 12.2.1
if (self.strict and d.type == Syntax.Identifier and isRestrictedWord(d.name)):
self.tolerateError(Messages.StrictVarName);
if (kind == 'const'):
if (not self.matchKeyword('in')):
self.expect('=')
init = self.isolateCoverGrammar(self.parseAssignmentExpression)
elif ((not options['inFor'] and d.type != Syntax.Identifier) or self.match('=')):
self.expect('=');
init = self.isolateCoverGrammar(self.parseAssignmentExpression);
return node.finishVariableDeclarator(d, init)
def parseBindingList(self, kind, options):
list = [];
while True:
list.append(self.parseLexicalBinding(kind, options));
if (not self.match(',')):
break
self.lex();
if not (self.startIndex < self.length):
break
return list;
def parseLexicalDeclaration(self, options):
node = Node();
kind = self.lex()['value']
assert kind == 'let' or kind == 'const', 'Lexical declaration must be either let or const'
declarations = self.parseBindingList(kind, options);
self.consumeSemicolon();
return node.finishLexicalDeclaration(declarations, kind);
def parseRestElement(self):
node = Node();
self.lex();
if (self.match('{')):
self.throwError(Messages.ObjectPatternAsRestParameter)
param = self.parseVariableIdentifier();
if (self.match('=')):
self.throwError(Messages.DefaultRestParameter);
if (not self.match(')')):
self.throwError(Messages.ParameterAfterRestParameter);
return node.finishRestElement(param);
# 12.3 Empty Statement
def parseEmptyStatement(self, node):
self.expect(';');
return node.finishEmptyStatement()
# 12.4 Expression Statement
def parseExpressionStatement(self, node):
expr = self.parseExpression();
self.consumeSemicolon();
return node.finishExpressionStatement(expr);
# 12.5 If statement
def parseIfStatement(self, node):
self.expectKeyword('if');
self.expect('(');
test = self.parseExpression();
self.expect(')');
consequent = self.parseStatement();
if (self.matchKeyword('else')):
self.lex();
alternate = self.parseStatement();
else:
alternate = null;
return node.finishIfStatement(test, consequent, alternate)
# 12.6 Iteration Statements
def parseDoWhileStatement(self, node):
self.expectKeyword('do')
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true
body = self.parseStatement();
self.state['inIteration'] = oldInIteration;
self.expectKeyword('while');
self.expect('(');
test = self.parseExpression();
self.expect(')')
if (self.match(';')):
self.lex()
return node.finishDoWhileStatement(body, test)
def parseWhileStatement(self, node):
self.expectKeyword('while')
self.expect('(')
test = self.parseExpression()
self.expect(')')
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true
body = self.parseStatement()
self.state['inIteration'] = oldInIteration
return node.finishWhileStatement(test, body)
def parseForStatement(self, node):
previousAllowIn = self.state['allowIn']
init = test = update = null
self.expectKeyword('for')
self.expect('(')
if (self.match(';')):
self.lex()
else:
if (self.matchKeyword('var')):
init = Node()
self.lex()
self.state['allowIn'] = false;
init = init.finishVariableDeclaration(self.parseVariableDeclarationList())
self.state['allowIn'] = previousAllowIn
if (len(init.declarations) == 1 and self.matchKeyword('in')):
self.lex()
left = init
right = self.parseExpression()
init = null
else:
self.expect(';')
elif (self.matchKeyword('const') or self.matchKeyword('let')):
init = Node()
kind = self.lex()['value']
self.state['allowIn'] = false
declarations = self.parseBindingList(kind, {'inFor': true})
self.state['allowIn'] = previousAllowIn
if (len(declarations) == 1 and declarations[0].init == null and self.matchKeyword('in')):
init = init.finishLexicalDeclaration(declarations, kind);
self.lex();
left = init;
right = self.parseExpression();
init = null;
else:
self.consumeSemicolon();
init = init.finishLexicalDeclaration(declarations, kind);
else:
initStartToken = self.lookahead
self.state['allowIn'] = false
init = self.inheritCoverGrammar(self.parseAssignmentExpression);
self.state['allowIn'] = previousAllowIn;
if (self.matchKeyword('in')):
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInForIn)
self.lex();
self.reinterpretExpressionAsPattern(init);
left = init;
right = self.parseExpression();
init = null;
else:
if (self.match(',')):
initSeq = [init];
while (self.match(',')):
self.lex();
initSeq.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
init = WrappingNode(initStartToken).finishSequenceExpression(initSeq)
self.expect(';');
if ('left' not in locals()):
if (not self.match(';')):
test = self.parseExpression();
self.expect(';');
if (not self.match(')')):
update = self.parseExpression();
self.expect(')');
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true;
body = self.isolateCoverGrammar(self.parseStatement)
self.state['inIteration'] = oldInIteration;
return node.finishForStatement(init, test, update, body) if ('left' not in locals()) else node.finishForInStatement(left, right, body);
# 12.7 The continue statement
def parseContinueStatement(self, node):
label = null
self.expectKeyword('continue');
# Optimize the most common form: 'continue;'.
if ord(self.source[self.startIndex]) == 0x3B:
self.lex();
if (not self.state['inIteration']):
self.throwError(Messages.IllegalContinue)
return node.finishContinueStatement(null)
if (self.hasLineTerminator):
if (not self.state['inIteration']):
self.throwError(Messages.IllegalContinue);
return node.finishContinueStatement(null);
if (self.lookahead['type'] == Token.Identifier):
label = self.parseVariableIdentifier();
key = '$' + label.name;
if not key in self.state['labelSet']: # todo make sure its correct!
self.throwError(Messages.UnknownLabel, label.name);
self.consumeSemicolon()
if (label == null and not self.state['inIteration']):
self.throwError(Messages.IllegalContinue)
return node.finishContinueStatement(label)
# 12.8 The break statement
def parseBreakStatement(self, node):
label = null
self.expectKeyword('break');
# Catch the very common case first: immediately a semicolon (U+003B).
if (ord(self.source[self.lastIndex]) == 0x3B):
self.lex();
if (not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak)
return node.finishBreakStatement(null)
if (self.hasLineTerminator):
if (not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak);
return node.finishBreakStatement(null);
if (self.lookahead['type'] == Token.Identifier):
label = self.parseVariableIdentifier();
key = '$' + label.name;
if not (key in self.state['labelSet']):
self.throwError(Messages.UnknownLabel, label.name);
self.consumeSemicolon();
if (label == null and not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak)
return node.finishBreakStatement(label);
# 12.9 The return statement
def parseReturnStatement(self, node):
argument = null;
self.expectKeyword('return');
if (not self.state['inFunctionBody']):
self.tolerateError(Messages.IllegalReturn);
# 'return' followed by a space and an identifier is very common.
if (ord(self.source[self.lastIndex]) == 0x20):
if (isIdentifierStart(self.source[self.lastIndex + 1])):
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishReturnStatement(argument)
if (self.hasLineTerminator):
# HACK
return node.finishReturnStatement(null)
if (not self.match(';')):
if (not self.match('}') and self.lookahead['type'] != Token.EOF):
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishReturnStatement(argument);
# 12.10 The with statement
def parseWithStatement(self, node):
if (self.strict):
self.tolerateError(Messages.StrictModeWith)
self.expectKeyword('with');
self.expect('(');
obj = self.parseExpression();
self.expect(')');
body = self.parseStatement();
return node.finishWithStatement(obj, body);
# 12.10 The swith statement
def parseSwitchCase(self):
consequent = []
node = Node();
if (self.matchKeyword('default')):
self.lex();
test = null;
else:
self.expectKeyword('case');
test = self.parseExpression();
self.expect(':');
while (self.startIndex < self.length):
if (self.match('}') or self.matchKeyword('default') or self.matchKeyword('case')):
break
statement = self.parseStatementListItem()
consequent.append(statement)
return node.finishSwitchCase(test, consequent)
def parseSwitchStatement(self, node):
self.expectKeyword('switch');
self.expect('(');
discriminant = self.parseExpression();
self.expect(')');
self.expect('{');
cases = [];
if (self.match('}')):
self.lex();
return node.finishSwitchStatement(discriminant, cases);
oldInSwitch = self.state['inSwitch'];
self.state['inSwitch'] = true;
defaultFound = false;
while (self.startIndex < self.length):
if (self.match('}')):
break;
clause = self.parseSwitchCase();
if (clause.test == null):
if (defaultFound):
self.throwError(Messages.MultipleDefaultsInSwitch);
defaultFound = true;
cases.append(clause);
self.state['inSwitch'] = oldInSwitch;
self.expect('}');
return node.finishSwitchStatement(discriminant, cases);
# 12.13 The throw statement
def parseThrowStatement(self, node):
self.expectKeyword('throw');
if (self.hasLineTerminator):
self.throwError(Messages.NewlineAfterThrow);
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishThrowStatement(argument);
# 12.14 The try statement
def parseCatchClause(self):
node = Node();
self.expectKeyword('catch');
self.expect('(');
if (self.match(')')):
self.throwUnexpectedToken(self.lookahead);
param = self.parsePattern();
# 12.14.1
if (self.strict and isRestrictedWord(param.name)):
self.tolerateError(Messages.StrictCatchVariable);
self.expect(')');
body = self.parseBlock();
return node.finishCatchClause(param, body);
def parseTryStatement(self, node):
handler = null
finalizer = null;
self.expectKeyword('try');
block = self.parseBlock();
if (self.matchKeyword('catch')):
handler = self.parseCatchClause()
if (self.matchKeyword('finally')):
self.lex();
finalizer = self.parseBlock();
if (not handler and not finalizer):
self.throwError(Messages.NoCatchOrFinally)
return node.finishTryStatement(block, handler, finalizer)
# 12.15 The debugger statement
def parseDebuggerStatement(self, node):
self.expectKeyword('debugger');
self.consumeSemicolon();
return node.finishDebuggerStatement();
# 12 Statements
def parseStatement(self):
typ = self.lookahead['type']
if (typ == Token.EOF):
self.throwUnexpectedToken(self.lookahead)
if (typ == Token.Punctuator and self.lookahead['value'] == '{'):
return self.parseBlock()
self.isAssignmentTarget = self.isBindingElement = true;
node = Node();
val = self.lookahead['value']
if (typ == Token.Punctuator):
if val == ';':
return self.parseEmptyStatement(node);
elif val == '(':
return self.parseExpressionStatement(node);
elif (typ == Token.Keyword):
if val == 'break':
return self.parseBreakStatement(node);
elif val == 'continue':
return self.parseContinueStatement(node);
elif val == 'debugger':
return self.parseDebuggerStatement(node);
elif val == 'do':
return self.parseDoWhileStatement(node);
elif val == 'for':
return self.parseForStatement(node);
elif val == 'function':
return self.parseFunctionDeclaration(node);
elif val == 'if':
return self.parseIfStatement(node);
elif val == 'return':
return self.parseReturnStatement(node);
elif val == 'switch':
return self.parseSwitchStatement(node);
elif val == 'throw':
return self.parseThrowStatement(node);
elif val == 'try':
return self.parseTryStatement(node);
elif val == 'var':
return self.parseVariableStatement(node);
elif val == 'while':
return self.parseWhileStatement(node);
elif val == 'with':
return self.parseWithStatement(node);
expr = self.parseExpression();
# 12.12 Labelled Statements
if ((expr.type == Syntax.Identifier) and self.match(':')):
self.lex();
key = '$' + expr.name
if key in self.state['labelSet']:
self.throwError(Messages.Redeclaration, 'Label', expr.name);
self.state['labelSet'][key] = true
labeledBody = self.parseStatement()
del self.state['labelSet'][key]
return node.finishLabeledStatement(expr, labeledBody)
self.consumeSemicolon();
return node.finishExpressionStatement(expr)
# 13 Function Definition
def parseFunctionSourceElements(self):
body = []
node = Node()
firstRestricted = None
self.expect('{')
while (self.startIndex < self.length):
if (self.lookahead['type'] != Token.StringLiteral):
break
token = self.lookahead;
statement = self.parseStatementListItem()
body.append(statement)
if (statement.expression.type != Syntax.Literal):
# this is not directive
break
directive = self.source[token['start']+1 : token['end']-1]
if (directive == 'use strict'):
self.strict = true;
if (firstRestricted):
self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral);
else:
if (not firstRestricted and token.get('octal')):
firstRestricted = token;
oldLabelSet = self.state['labelSet']
oldInIteration = self.state['inIteration']
oldInSwitch = self.state['inSwitch']
oldInFunctionBody = self.state['inFunctionBody']
oldParenthesisCount = self.state['parenthesizedCount']
self.state['labelSet'] = {}
self.state['inIteration'] = false
self.state['inSwitch'] = false
self.state['inFunctionBody'] = true
self.state['parenthesizedCount'] = 0
while (self.startIndex < self.length):
if (self.match('}')):
break
body.append(self.parseStatementListItem())
self.expect('}')
self.state['labelSet'] = oldLabelSet;
self.state['inIteration'] = oldInIteration;
self.state['inSwitch'] = oldInSwitch;
self.state['inFunctionBody'] = oldInFunctionBody;
self.state['parenthesizedCount'] = oldParenthesisCount;
return node.finishBlockStatement(body)
def validateParam(self, options, param, name):
key = '$' + name
if (self.strict):
if (isRestrictedWord(name)):
options['stricted'] = param;
options['message'] = Messages.StrictParamName
if key in options['paramSet']:
options['stricted'] = param;
options['message'] = Messages.StrictParamDupe;
elif (not options['firstRestricted']):
if (isRestrictedWord(name)):
options['firstRestricted'] = param;
options['message'] = Messages.StrictParamName;
elif (isStrictModeReservedWord(name)):
options['firstRestricted'] = param;
options['message'] = Messages.StrictReservedWord;
elif key in options['paramSet']:
options['firstRestricted']= param
options['message'] = Messages.StrictParamDupe;
options['paramSet'][key] = true
def parseParam(self, options):
token = self.lookahead
de = None
if (token['value'] == '...'):
param = self.parseRestElement();
self.validateParam(options, param.argument, param.argument.name);
options['params'].append(param);
options['defaults'].append(null);
return false
param = self.parsePatternWithDefault();
self.validateParam(options, token, token['value']);
if (param.type == Syntax.AssignmentPattern):
de = param.right;
param = param.left;
options['defaultCount'] += 1
options['params'].append(param);
options['defaults'].append(de)
return not self.match(')')
def parseParams(self, firstRestricted):
options = {
'params': [],
'defaultCount': 0,
'defaults': [],
'firstRestricted': firstRestricted}
self.expect('(');
if (not self.match(')')):
options['paramSet'] = {};
while (self.startIndex < self.length):
if (not self.parseParam(options)):
break
self.expect(',');
self.expect(')');
if (options['defaultCount'] == 0):
options['defaults'] = [];
return {
'params': options['params'],
'defaults': options['defaults'],
'stricted': options.get('stricted'),
'firstRestricted': options.get('firstRestricted'),
'message': options.get('message')}
def parseFunctionDeclaration(self, node, identifierIsOptional=None):
d = null
params = []
defaults = []
message = None
firstRestricted = None
self.expectKeyword('function');
if (identifierIsOptional or not self.match('(')):
token = self.lookahead;
d = self.parseVariableIdentifier();
if (self.strict):
if (isRestrictedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictFunctionName);
else:
if (isRestrictedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictFunctionName;
elif (isStrictModeReservedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictReservedWord;
tmp = self.parseParams(firstRestricted);
params = tmp['params']
defaults = tmp['defaults']
stricted = tmp['stricted']
firstRestricted = tmp['firstRestricted']
if (tmp.get('message')):
message = tmp['message'];
previousStrict = self.strict;
body = self.parseFunctionSourceElements();
if (self.strict and firstRestricted):
self.throwUnexpectedToken(firstRestricted, message);
if (self.strict and stricted):
self.tolerateUnexpectedToken(stricted, message);
self.strict = previousStrict;
return node.finishFunctionDeclaration(d, params, defaults, body);
def parseFunctionExpression(self):
id = null
params = []
defaults = []
node = Node();
firstRestricted = None
message = None
self.expectKeyword('function');
if (not self.match('(')):
token = self.lookahead;
id = self.parseVariableIdentifier();
if (self.strict):
if (isRestrictedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictFunctionName);
else:
if (isRestrictedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictFunctionName;
elif (isStrictModeReservedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictReservedWord;
tmp = self.parseParams(firstRestricted);
params = tmp['params']
defaults = tmp['defaults']
stricted = tmp['stricted']
firstRestricted = tmp['firstRestricted']
if (tmp.get('message')):
message = tmp['message']
previousStrict = self.strict;
body = self.parseFunctionSourceElements();
if (self.strict and firstRestricted):
self.throwUnexpectedToken(firstRestricted, message);
if (self.strict and stricted):
self.tolerateUnexpectedToken(stricted, message);
self.strict = previousStrict;
return node.finishFunctionExpression(id, params, defaults, body);
# todo Translate parse class functions!
def parseClassExpression(self):
raise NotImplementedError()
def parseClassDeclaration(self):
raise NotImplementedError()
# 14 Program
def parseScriptBody(self):
body = []
firstRestricted = None
while (self.startIndex < self.length):
token = self.lookahead;
if (token['type'] != Token.StringLiteral):
break
statement = self.parseStatementListItem();
body.append(statement);
if (statement.expression.type != Syntax.Literal):
# this is not directive
break
directive = self.source[token['start'] + 1: token['end'] - 1]
if (directive == 'use strict'):
self.strict = true;
if (firstRestricted):
self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral)
else:
if (not firstRestricted and token.get('octal')):
firstRestricted = token;
while (self.startIndex < self.length):
statement = self.parseStatementListItem();
# istanbul ignore if
if (statement is None):
break
body.append(statement);
return body;
def parseProgram(self):
self.peek()
node = Node()
body = self.parseScriptBody()
return node.finishProgram(body)
# DONE!!!
def parse(self, code, options={}):
if options:
raise NotImplementedError('Options not implemented! You can only use default settings.')
self.clean()
self.source = unicode(code) + ' \n ; //END' # I have to add it in order not to check for EOF every time
self.index = 0
self.lineNumber = 1 if len(self.source) > 0 else 0
self.lineStart = 0
self.startIndex = self.index
self.startLineNumber = self.lineNumber;
self.startLineStart = self.lineStart;
self.length = len(self.source)
self.lookahead = null;
self.state = {
'allowIn': true,
'labelSet': {},
'inFunctionBody': false,
'inIteration': false,
'inSwitch': false,
'lastCommentStart': -1,
'curlyStack': [],
'parenthesizedCount': None}
self.sourceType = 'script';
self.strict = false;
program = self.parseProgram();
return node_to_dict(program)
if __name__=='__main__':
import time
test_path = None
if test_path:
f = open(test_path, 'rb')
x = f.read()
f.close()
else:
x = 'var $ = "Hello!"'
p = PyJsParser()
t = time.time()
res = p.parse(x)
dt = time.time() - t+ 0.000000001
if test_path:
print len(res)
else:
pprint(res)
print
print 'Parsed everyting in', round(dt,5), 'seconds.'
print 'Thats %d characters per second' % int(len(x)/dt)
| felipenaselva/felipe.repository | script.module.streamhublive/resources/modules/js2py/translators/pyjsparser.py | Python | gpl-2.0 | 103,330 | [
"VisIt"
] | c953a1ca2462327af61be0917c4fad8e05351a9d72a27872a72db6f04ca67e9a |
#!/usr/bin/python
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
""" Module: itim
============
"""
from __future__ import print_function
import platform
from multiprocessing import Process, Queue
import numpy as np
try:
from __builtin__ import zip as builtin_zip
except:
from builtins import zip as builtin_zip
from scipy.spatial import cKDTree
from . import messages
from . import utilities
from .surface import SurfaceFlatInterface as Surface
from .sanity_check import SanityCheck
from .interface import Interface
from .patches import patchTrajectory, patchOpenMM, patchMDTRAJ
class ITIM(Interface):
""" Identifies interfacial molecules at macroscopically flat interfaces.
*(Pártay, L. B.; Hantal, Gy.; Jedlovszky, P.; Vincze, Á.; Horvai, G., \
J. Comp. Chem. 29, 945, 2008)*
:param Object universe: The MDAnalysis_ Universe, MDTraj_ trajectory
or OpenMM_ Simulation objects.
:param Object group: An AtomGroup, or an array-like object with
the indices of the atoms in the group. Will
identify the interfacial molecules from this
group
:param float alpha: The probe sphere radius
:param str normal: The macroscopic interface normal direction
'x','y', 'z' or 'guess' (default)
:param bool molecular: Switches between search of interfacial
molecules / atoms (default: True)
:param int max_layers: The number of layers to be identified
:param dict radii_dict: Dictionary with the atomic radii of the
elements in the group. If None is supplied,
the default one (from GROMOS 43a1) will be
used.
:param float cluster_cut: Cutoff used for neighbors or density-based
cluster search (default: None disables the
cluster analysis)
:param float cluster_threshold_density: Number density threshold for
the density-based cluster search. 'auto'
determines the threshold automatically.
Default: None uses simple neighbors cluster
search, if cluster_cut is not None
:param Object extra_cluster_groups: Additional groups, to allow for
mixed interfaces
:param bool info: Print additional info
:param bool centered: Center the :py:obj:`group`
:param bool warnings: Print warnings
:param float mesh: The grid spacing used for the testlines
(default 0.4 Angstrom)
:param bool autoassign: If true (default) detect the interface
every time a new frame is selected.
Example:
>>> import MDAnalysis as mda
>>> import numpy as np
>>> import pytim
>>> from pytim.datafiles import *
>>>
>>> u = mda.Universe(WATER_GRO)
>>> oxygens = u.select_atoms("name OW")
>>>
>>> interface = pytim.ITIM(u, alpha=1.5, max_layers=4,molecular=True)
>>> # atoms in the layers can be accesses either through
>>> # the layers array:
>>> print (interface.layers)
[[<AtomGroup with 786 atoms> <AtomGroup with 681 atoms>
<AtomGroup with 663 atoms> <AtomGroup with 651 atoms>]
[<AtomGroup with 786 atoms> <AtomGroup with 702 atoms>
<AtomGroup with 666 atoms> <AtomGroup with 636 atoms>]]
>>> interface.layers[0,0] # upper side, first layer
<AtomGroup with 786 atoms>
>>> interface.layers[1,2] # lower side, third layer
<AtomGroup with 666 atoms>
>>> # or as a whole AtomGroup. This can include all atoms in all layers
>>> interface.atoms
<AtomGroup with 5571 atoms>
>>> selection = interface.atoms.sides == 0
>>> interface.atoms[ selection ] # all atoms in the upper side layer
<AtomGroup with 2781 atoms>
>>> selection = np.logical_and(interface.atoms.layers == 2 , selection)
>>> interface.atoms[ selection ] # upper side, second layer
<AtomGroup with 681 atoms>
>>> # the whole system can be quickly saved to a pdb file
>>> # including the layer information, written in the beta field
>>> # using:
>>> interface.writepdb('system.pdb',centered=True)
>>> # of course, the native interface of MDAnalysis can be used to
>>> # write pdb files, but the centering options are not available.
>>> # Writing to other formats that do not support the beta factor
>>> # will loose the information on the layers.
>>> interface.atoms.write('only_layers.pdb')
>>> # In some cases it might be necessary to compute two interfaces.
>>> # This could be done in the following way:
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import WATER_GRO, WATER_XTC
>>> u = mda.Universe(WATER_GRO,WATER_XTC)
>>> u2 = mda.Universe(WATER_GRO,WATER_XTC)
>>> inter = pytim.ITIM(u,group=u.select_atoms('resname SOL'))
>>> inter2 = pytim.ITIM(u2,group=u2.select_atoms('resname SOL'))
>>> for ts in u.trajectory[::50]:
... ts2 = u2.trajectory[ts.frame]
>>> # pytim can be used also on top of mdtraj (MDAnalysis must be present,though)
>>> import mdtraj
>>> import pytim
>>> from pytim.datafiles import WATER_GRO, WATER_XTC
>>> t = mdtraj.load_xtc(WATER_XTC,top=WATER_GRO)
>>> inter = pytim.ITIM(t)
.. _MDAnalysis: http://www.mdanalysis.org/
.. _MDTraj: http://www.mdtraj.org/
.. _OpenMM: http://www.openmm.org/
"""
@property
def layers(self):
"""Access the layers as numpy arrays of AtomGroups.
The object can be sliced as usual with numpy arrays, so, for example:
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import *
>>>
>>> u = mda.Universe(WATER_GRO)
>>> oxygens = u.select_atoms("name OW")
>>>
>>> interface = pytim.ITIM(u, alpha=1.5, max_layers=4,molecular=True)
>>> print(interface.layers[0,:]) # upper side (0), all layers
[<AtomGroup with 786 atoms> <AtomGroup with 681 atoms>
<AtomGroup with 663 atoms> <AtomGroup with 651 atoms>]
>>> repr(interface.layers[1,0]) # lower side (1), first layer (0)
'<AtomGroup with 786 atoms>'
>>> print(interface.layers[:,0:3]) # 1st - 3rd layer (0:3), on both sides
[[<AtomGroup with 786 atoms> <AtomGroup with 681 atoms>
<AtomGroup with 663 atoms>]
[<AtomGroup with 786 atoms> <AtomGroup with 702 atoms>
<AtomGroup with 666 atoms>]]
>>> print(interface.layers[1,0:4:2]) # side 1, layers 1-4 & stride 2 (0:4:2)
[<AtomGroup with 786 atoms> <AtomGroup with 666 atoms>]
"""
return self._layers
def __init__(self,
universe,
group=None,
alpha=1.5,
normal='guess',
molecular=True,
max_layers=1,
radii_dict=None,
cluster_cut=None,
cluster_threshold_density=None,
extra_cluster_groups=None,
info=False,
centered=False,
warnings=False,
mesh=0.4,
autoassign=True,
**kargs):
self.autoassign = autoassign
self.biggest_cluster_only = True # necessary for ITIM
self.symmetry = 'planar'
self.do_center = centered
self.system = platform.system()
sanity = SanityCheck(self, warnings=warnings)
sanity.assign_universe(universe, group)
sanity.assign_alpha(alpha)
sanity.assign_mesh(mesh)
self.max_layers = max_layers
self._layers = np.empty(
[2, max_layers], dtype=self.universe.atoms[0].__class__)
self._surfaces = np.empty(max_layers, dtype=type(Surface))
self.info = info
self.normal = None
self.PDB = {}
self.molecular = molecular
sanity.assign_cluster_params(cluster_cut,
cluster_threshold_density, extra_cluster_groups)
sanity.assign_normal(normal)
sanity.assign_radii(radii_dict=radii_dict)
self.grid = None
self.use_threads = False
patchTrajectory(self.universe.trajectory, self)
self._assign_layers()
def _create_mesh(self):
""" Mesh assignment method
Based on a target value, determine a mesh size for the testlines
that is compatible with the simulation box.
Create the grid and initialize a cKDTree object with it to
facilitate fast searching of the gridpoints touched by molecules.
"""
box = utilities.get_box(self.universe, self.normal)
n, d = utilities.compute_compatible_mesh_params(self.target_mesh, box)
self.mesh_nx = n[0]
self.mesh_ny = n[1]
self.mesh_dx = d[0]
self.mesh_dy = d[1]
_x = np.linspace(0, box[0], num=int(self.mesh_nx), endpoint=False)
_y = np.linspace(0, box[1], num=int(self.mesh_ny), endpoint=False)
_X, _Y = np.meshgrid(_x, _y)
self.meshpoints = np.array([_X.ravel(), _Y.ravel()]).T
self.meshtree = cKDTree(self.meshpoints, boxsize=box[:2])
def _touched_lines(self, atom, _x, _y, _z, _radius):
return self.meshtree.query_ball_point([_x[atom], _y[atom]],
_radius[atom] + self.alpha)
def _append_layers(self, uplow, layer, layers):
inlayer_indices = np.flatnonzero(self._seen[uplow] == layer + 1)
inlayer_group = self.cluster_group[inlayer_indices]
if self.molecular is True:
# we first select the (unique) residues corresponding
# to inlayer_group, and then we create group of the
# atoms belonging to them, with
# inlayer_group.residues.atoms
inlayer_group = inlayer_group.residues.atoms
# now we need the indices within the cluster_group,
# of the atoms in the molecular layer group;
# NOTE that from MDAnalysis 0.16, .ids runs from 1->N
# (was 0->N-1 in 0.15), we use now .indices
indices = np.flatnonzero(
np.in1d(self.cluster_group.atoms.indices,
inlayer_group.atoms.indices))
# and update the tagged, sorted atoms
self._seen[uplow][indices] = layer + 1
# one of the two layers (upper,lower) or both are empty
if not inlayer_group:
raise Exception(messages.EMPTY_LAYER)
layers.append(inlayer_group)
def _assign_one_side(self,
uplow,
sorted_atoms,
_x,
_y,
_z,
_radius,
queue=None):
layers = []
for layer in range(0, self.max_layers):
# this mask tells which lines have been touched.
mask = self.mask[uplow][layer]
# atom here goes to 0 to #sorted_atoms, it is not a MDAnalysis
# index/atom
for atom in sorted_atoms:
if self._seen[uplow][atom] != 0:
continue
touched_lines = self._touched_lines(atom, _x, _y, _z, _radius)
_submask = mask[touched_lines]
if (len(_submask[_submask == 0]) == 0):
# no new contact, let's move to the next atom
continue
# let's mark now: 1) the touched lines
mask[touched_lines] = 1
# 2) the sorted atoms.
self._seen[uplow][atom] = layer + 1
# 3) if all lines have been touched, create a group that
# includes all atoms in this layer
if np.sum(mask) == len(mask):
self._append_layers(uplow, layer, layers)
break
if (queue is None):
return layers
else:
queue.put(layers)
def _prepare_layers_assignment(self):
self._create_mesh()
size = (2, int(self.max_layers), int(self.mesh_nx) * int(self.mesh_ny))
self.mask = np.zeros(size, dtype=int)
self.prepare_box()
def _prelabel_groups(self):
# first we label all atoms in group to be in the gas phase
self.label_group(self.analysis_group.atoms, beta=0.5)
# then all atoms in the largest group are labelled as liquid-like
self.label_group(self.cluster_group.atoms, beta=0.0)
def _assign_layers(self):
""" Determine the ITIM layers.
Note that the multiproc option is mainly for debugging purposes:
>>> import MDAnalysis as mda
>>> import pytim
>>> u = mda.Universe(pytim.datafiles.WATER_GRO)
>>> inter = pytim.ITIM(u,multiproc=True)
>>> test1 = len(inter.layers[0,0])
>>> inter = pytim.ITIM(u,multiproc=False)
>>> test2 = len(inter.layers[0,0])
>>> test1==test2
True
"""
up, low = 0, 1
self.reset_labels()
self._prepare_layers_assignment()
# groups have been checked already in _sanity_checks()
self._define_cluster_group()
# we always (internally) center in ITIM
self.center(planar_to_origin=True)
self._prelabel_groups()
_radius = self.cluster_group.radii
size = len(self.cluster_group.positions)
self._seen = [
np.zeros(size, dtype=np.int8),
np.zeros(size, dtype=np.int8)
]
_x = utilities.get_x(self.cluster_group, self.normal)
_y = utilities.get_y(self.cluster_group, self.normal)
_z = utilities.get_z(self.cluster_group, self.normal)
sort = np.argsort(_z + _radius * np.sign(_z))
# NOTE: np.argsort returns the sorted *indices*
if self.multiproc and ('win' not in self.system.lower()):
# so far, it justs exploit a simple scheme splitting
# the calculation between the two sides. Would it be
# possible to implement easily 2d domain decomposition?
proc, queue = [None, None], [Queue(), Queue()]
proc[up] = Process(
target=self._assign_one_side,
args=(up, sort[::-1], _x, _y, _z, _radius, queue[up]))
proc[low] = Process(
target=self._assign_one_side,
args=(low, sort[::], _x, _y, _z, _radius, queue[low]))
for p in proc:
p.start()
for uplow in [up, low]:
for index, group in enumerate(queue[uplow].get()):
# cannot use self._layers[uplow][index] = group, otherwise
# info about universe is lost (do not know why yet)
# must use self._layers[uplow][index] =
# self.universe.atoms[group.indices]
self._layers[uplow][index] = self.universe.atoms[group.indices]
for p in proc:
p.join()
for q in queue:
q.close()
else:
for index, group in enumerate(self._assign_one_side(up, sort[::-1], _x, _y, _z, _radius)):
self._layers[up][index] = group
for index, group in enumerate(self._assign_one_side(low, sort[::], _x, _y, _z, _radius)):
self._layers[low][index] = group
self.label_planar_sides()
for nlayer, layer in enumerate(self._layers[
0]): # TODO should this be moved out of assign_layers?
self._surfaces[nlayer] = Surface(self, options={'layer': nlayer})
if self.do_center is False: # NOTE: do_center requires centering in
# the middle of the box.
# ITIM always centers internally in the
# origin along the normal.
self.universe.atoms.positions = self.original_positions
else:
self._shift_positions_to_middle()
#
| Marcello-Sega/pytim | pytim/itim.py | Python | gpl-3.0 | 16,830 | [
"GROMOS",
"MDAnalysis",
"MDTraj",
"OpenMM"
] | ade4c26e25460b15dfa4088bb5f3f792f56283ef972076a63970accec9743bc4 |
import sys, config
from galaxy.web import security
import galaxy.webapps.demo_sequencer.registry
class UniverseApplication( object ):
"""Encapsulates the state of a Universe application"""
def __init__( self, **kwargs ):
print >> sys.stderr, "python path is: " + ", ".join( sys.path )
self.name = "demo_sequencer"
# Read config file and check for errors
self.config = config.Configuration( **kwargs )
self.config.check()
config.configure_logging( self.config )
# Set up sequencer actions registry
self.sequencer_actions_registry = galaxy.webapps.demo_sequencer.registry.Registry( self.config.root, self.config.sequencer_actions_config )
# Security helper
self.security = security.SecurityHelper( id_secret=self.config.id_secret )
def shutdown( self ):
pass
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/webapps/demo_sequencer/app.py | Python | gpl-3.0 | 858 | [
"Galaxy"
] | e6598fd1bc5ffef2200e8c60547808711a321549320ccdcdd2df0c9bdbf0e0bd |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".graphicstyle")
#-------------------------------------------------------------------------
#
# Line style
#
#-------------------------------------------------------------------------
SOLID = 0
DASHED = 1
DOTTED = 2
# Notes about adding new line styles:
# 1) the first style is used when an invalid style is specified by the report
# 2) the style names are used by the ODF generator and should be unique
# 3) the line style constants above need to be imported in the
# gen.plug.docgen.__init__ file so they can be used in a report add-on
line_style_names = ('solid', 'dashed', 'dotted')
_DASH_ARRAY = [ [1, 0], [2, 4], [1, 2] ]
def get_line_style_by_name(style_name):
which = 0
for (idx, sn) in enumerate(line_style_names):
if sn == style_name:
which = idx
break
return _DASH_ARRAY[which]
#------------------------------------------------------------------------
#
# GraphicsStyle
#
#------------------------------------------------------------------------
class GraphicsStyle(object):
"""
Defines the properties of graphics objects, such as line width,
color, fill, ect.
"""
def __init__(self, obj=None):
"""
Initialize the object with default values, unless a source
object is specified. In that case, make a copy of the source
object.
"""
if obj:
self.para_name = obj.para_name
self.shadow = obj.shadow
self.shadow_space = obj.shadow_space
self.color = obj.color
self.fill_color = obj.fill_color
self.lwidth = obj.lwidth
self.lstyle = obj.lstyle
else:
self.para_name = ""
self.shadow = 0
self.shadow_space = 0.2
self.lwidth = 0.5
self.color = (0, 0, 0)
self.fill_color = (255, 255, 255)
self.lstyle = SOLID
def set_line_width(self, val):
"""
sets the line width
"""
self.lwidth = val
def get_line_width(self):
"""
Return the name of the StyleSheet
"""
return self.lwidth
def get_line_style(self):
return self.lstyle
def set_line_style(self, val):
self.lstyle = val
def get_dash_style(self, val = None):
if val is None:
val = self.lstyle
if val >= 0 and val < len(_DASH_ARRAY):
return _DASH_ARRAY[val]
else:
return _DASH_ARRAY[0]
def get_dash_style_name(self, val=None):
if val is None:
val = self.lstyle
if val >= 0 and val < len(line_style_names):
return line_style_names[val]
else:
return line_style_names[0]
def set_paragraph_style(self, val):
self.para_name = val
def set_shadow(self, val, space=0.2):
self.shadow = val
self.shadow_space = space
def get_shadow_space(self):
return self.shadow_space
def set_color(self, val):
self.color = val
def set_fill_color(self, val):
self.fill_color = val
def get_paragraph_style(self):
return self.para_name
def get_shadow(self):
return self.shadow
def get_color(self):
return self.color
def get_fill_color(self):
return self.fill_color
| Forage/Gramps | gramps/gen/plug/docgen/graphicstyle.py | Python | gpl-2.0 | 4,929 | [
"Brian"
] | 7fb26ded477bd8f49eb8f9093601070cd9e580ad332bc78f79c3350b8219999d |
r"""
Ordination results format (:mod:`skbio.io.format.ordination`)
=============================================================
.. currentmodule:: skbio.io.format.ordination
The ordination results file format (``ordination``) stores the results of an
ordination method in a human-readable, text-based format. The format supports
storing the results of various ordination methods available in scikit-bio,
including (but not necessarily limited to) PCoA, CA, RDA, and CCA.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.stats.ordination.OrdinationResults` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
The format is text-based, consisting of six attributes that describe the
ordination results:
- ``Eigvals``: 1-D
- ``Proportion explained``: 1-D
- ``Species``: 2-D
- ``Site``: 2-D
- ``Biplot``: 2-D
- ``Site constraints``: 2-D
The attributes in the file *must* be in this order.
Each attribute is defined in its own section of the file, where sections are
separated by a blank (or whitespace-only) line. Each attribute begins with a
header line, which contains the attribute's name (as listed above), followed by
a tab character, followed by one or more tab-separated dimensions (integers)
that describe the shape of the attribute's data.
The attribute's data follows its header line, and is stored in tab-separated
format. ``Species``, ``Site``, and ``Site constraints`` store species and site
IDs, respectively, as the first column, followed by the 2-D data array.
An example of this file format might look like::
Eigvals<tab>4
0.36<tab>0.18<tab>0.07<tab>0.08
Proportion explained<tab>4
0.46<tab>0.23<tab>0.10<tab>0.10
Species<tab>9<tab>4
Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
Site<tab>10<tab>4
Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
Biplot<tab>3<tab>3
-0.16<tab>0.63<tab>0.76
-0.99<tab>0.06<tab>-0.04
0.18<tab>-0.97<tab>0.03
Site constraints<tab>10<tab>4
Site0<tab>0.69<tab>-3.08<tab>-0.32<tab>-1.24
Site1<tab>0.66<tab>-3.06<tab>0.23<tab>2.69
Site2<tab>0.63<tab>-3.04<tab>0.78<tab>-3.11
Site3<tab>1.10<tab>0.50<tab>-1.55<tab>0.66
Site4<tab>-0.97<tab>0.06<tab>-1.12<tab>-0.61
Site5<tab>1.05<tab>0.53<tab>-0.43<tab>0.28
Site6<tab>-1.02<tab>0.10<tab>-0.00<tab>-0.42
Site7<tab>0.99<tab>0.57<tab>0.67<tab>-0.00
Site8<tab>-1.08<tab>0.13<tab>1.11<tab>1.17
Site9<tab>0.94<tab>0.61<tab>1.79<tab>-1.28
If a given result attribute is not present (e.g. ``Biplot``), it should still
be defined and declare its dimensions as 0. For example::
Biplot<tab>0<tab>0
All attributes are optional except for ``Eigvals``.
Examples
--------
Assume we have the following tab-delimited text file storing the
ordination results in ``ordination`` format::
Eigvals<tab>4
0.36<tab>0.18<tab>0.07<tab>0.08
Proportion explained<tab>4
0.46<tab>0.23<tab>0.10<tab>0.10
Species<tab>9<tab>4
Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
Site<tab>10<tab>4
Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
Biplot<tab>0<tab>0
Site constraints<tab>0<tab>0
Load the ordination results from the file:
>>> from io import StringIO
>>> from skbio import OrdinationResults
>>> or_f = StringIO(
... "Eigvals\t4\n"
... "0.36\t0.18\t0.07\t0.08\n"
... "\n"
... "Proportion explained\t4\n"
... "0.46\t0.23\t0.10\t0.10\n"
... "\n"
... "Species\t9\t4\n"
... "Species0\t0.11\t0.28\t-0.20\t-0.00\n"
... "Species1\t0.14\t0.30\t0.39\t-0.14\n"
... "Species2\t-1.01\t0.09\t-0.19\t-0.10\n"
... "Species3\t-1.03\t0.10\t0.22\t0.22\n"
... "Species4\t1.05\t0.53\t-0.43\t0.22\n"
... "Species5\t0.99\t0.57\t0.67\t-0.38\n"
... "Species6\t0.25\t-0.17\t-0.20\t0.43\n"
... "Species7\t0.14\t-0.85\t-0.01\t0.05\n"
... "Species8\t0.41\t-0.70\t0.21\t-0.69\n"
... "\n"
... "Site\t10\t4\n"
... "Site0\t0.71\t-3.08\t0.21\t-1.24\n"
... "Site1\t0.58\t-3.00\t-0.94\t2.69\n"
... "Site2\t0.76\t-3.15\t2.13\t-3.11\n"
... "Site3\t1.11\t1.07\t-1.87\t0.66\n"
... "Site4\t-0.97\t-0.06\t-0.69\t-0.61\n"
... "Site5\t1.04\t0.45\t-0.63\t0.28\n"
... "Site6\t-0.95\t-0.08\t0.13\t-0.42\n"
... "Site7\t0.94\t-0.10\t0.52\t-0.00\n"
... "Site8\t-1.14\t0.49\t0.47\t1.17\n"
... "Site9\t1.03\t1.03\t2.74\t-1.28\n"
... "\n"
... "Biplot\t0\t0\n"
... "\n"
... "Site constraints\t0\t0\n")
>>> ord_res = OrdinationResults.read(or_f)
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from skbio._base import OrdinationResults
from skbio.io import create_format, OrdinationFormatError
ordination = create_format('ordination')
@ordination.sniffer()
def _ordination_sniffer(fh):
# Smells an ordination file if *all* of the following lines are present
# *from the beginning* of the file:
# - eigvals header (minimally parsed)
# - another line (contents ignored)
# - a whitespace-only line
# - proportion explained header (minimally parsed)
try:
_parse_header(fh, 'Eigvals', 1)
next_line = next(fh, None)
if next_line is not None:
_check_empty_line(fh)
_parse_header(fh, 'Proportion explained', 1)
return True, {}
except OrdinationFormatError:
pass
return False, {}
@ordination.reader(OrdinationResults)
def _ordination_to_ordination_results(fh):
eigvals = _parse_vector_section(fh, 'Eigvals')
if eigvals is None:
raise OrdinationFormatError("At least one eigval must be present.")
_check_empty_line(fh)
prop_expl = _parse_vector_section(fh, 'Proportion explained')
_check_length_against_eigvals(prop_expl, eigvals,
'proportion explained values')
_check_empty_line(fh)
species = _parse_array_section(fh, 'Species')
_check_length_against_eigvals(species, eigvals,
'coordinates per species')
_check_empty_line(fh)
site = _parse_array_section(fh, 'Site')
_check_length_against_eigvals(site, eigvals,
'coordinates per site')
_check_empty_line(fh)
# biplot does not have ids to parse (the other arrays do)
biplot = _parse_array_section(fh, 'Biplot', has_ids=False)
_check_empty_line(fh)
cons = _parse_array_section(fh, 'Site constraints')
if cons is not None and site is not None:
if not np.array_equal(cons.index, site.index):
raise OrdinationFormatError(
"Site constraints ids and site ids must be equal: %s != %s" %
(cons.index, site.index))
return OrdinationResults(
short_method_name='', long_method_name='', eigvals=eigvals,
features=species, samples=site, biplot_scores=biplot,
sample_constraints=cons, proportion_explained=prop_expl)
def _parse_header(fh, header_id, num_dimensions):
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for %s header." % header_id)
header = line.strip().split('\t')
# +1 for the header ID
if len(header) != num_dimensions + 1 or header[0] != header_id:
raise OrdinationFormatError("%s header not found." % header_id)
return header
def _check_empty_line(fh):
"""Check that the next line in `fh` is empty or whitespace-only."""
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for blank line separating "
"sections.")
if line.strip():
raise OrdinationFormatError("Expected an empty line.")
def _check_length_against_eigvals(data, eigvals, label):
if data is not None:
num_vals = data.shape[-1]
num_eigvals = eigvals.shape[-1]
if num_vals != num_eigvals:
raise OrdinationFormatError(
"There should be as many %s as eigvals: %d != %d" %
(label, num_vals, num_eigvals))
def _parse_vector_section(fh, header_id):
header = _parse_header(fh, header_id, 1)
# Parse how many values we are waiting for
num_vals = int(header[1])
if num_vals == 0:
# The ordination method didn't generate the vector, so set it to None
vals = None
else:
# Parse the line with the vector values
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for line containing values "
"for %s section." % header_id)
vals = pd.Series(np.asarray(line.strip().split('\t'),
dtype=np.float64))
if len(vals) != num_vals:
raise OrdinationFormatError(
"Expected %d values in %s section, but found %d." %
(num_vals, header_id, len(vals)))
return vals
def _parse_array_section(fh, header_id, has_ids=True):
"""Parse an array section of `fh` identified by `header_id`."""
# Parse the array header
header = _parse_header(fh, header_id, 2)
# Parse the dimensions of the array
rows = int(header[1])
cols = int(header[2])
ids = None
if rows == 0 and cols == 0:
# The ordination method didn't generate the array data for 'header', so
# set it to None
data = None
elif rows == 0 or cols == 0:
# Both dimensions should be 0 or none of them are zero
raise OrdinationFormatError("One dimension of %s is 0: %d x %d" %
(header_id, rows, cols))
else:
# Parse the data
data = np.empty((rows, cols), dtype=np.float64)
if has_ids:
ids = []
for i in range(rows):
# Parse the next row of data
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for row %d in %s "
"section." % (i + 1, header_id))
vals = line.strip().split('\t')
if has_ids:
ids.append(vals[0])
vals = vals[1:]
if len(vals) != cols:
raise OrdinationFormatError(
"Expected %d values, but found %d in row %d." %
(cols, len(vals), i + 1))
data[i, :] = np.asarray(vals, dtype=np.float64)
data = pd.DataFrame(data, index=ids)
return data
@ordination.writer(OrdinationResults)
def _ordination_results_to_ordination(obj, fh):
_write_vector_section(fh, 'Eigvals', obj.eigvals)
_write_vector_section(fh, 'Proportion explained', obj.proportion_explained)
_write_array_section(fh, 'Species', obj.features)
_write_array_section(fh, 'Site', obj.samples)
_write_array_section(fh, 'Biplot', obj.biplot_scores, has_ids=False)
_write_array_section(fh, 'Site constraints', obj.sample_constraints,
include_section_separator=False)
def _write_vector_section(fh, header_id, vector):
if vector is None:
shape = 0
else:
shape = vector.shape[0]
fh.write("%s\t%d\n" % (header_id, shape))
if vector is not None:
fh.write(_format_vector(vector.values))
fh.write("\n")
def _write_array_section(fh, header_id, data, has_ids=True,
include_section_separator=True):
# write section header
if data is None:
shape = (0, 0)
else:
shape = data.shape
fh.write("%s\t%d\t%d\n" % (header_id, shape[0], shape[1]))
# write section data
if data is not None:
if not has_ids:
for vals in data.values:
fh.write(_format_vector(vals))
else:
for id_, vals in zip(data.index, data.values):
fh.write(_format_vector(vals, id_))
if include_section_separator:
fh.write("\n")
def _format_vector(vector, id_=None):
formatted_vector = '\t'.join(np.asarray(vector, dtype=np.str))
if id_ is None:
return "%s\n" % formatted_vector
else:
return "%s\t%s\n" % (id_, formatted_vector)
| anderspitman/scikit-bio | skbio/io/format/ordination.py | Python | bsd-3-clause | 14,413 | [
"scikit-bio"
] | 449648c2bb4cc83f4d11afedfdbf8da95423d14363de06a98bf3906440adebe1 |
r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix or nt, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix' or 'nt'
- os.curdir is a string representing the current directory (always '.')
- os.pardir is a string representing the parent directory (always '..')
- os.sep is the (or a most common) pathname separator ('/' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import abc
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# Defeats race condition when another thread created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
topdown is false is ineffective, since the directories in dirnames have
already been generated by the time dirnames itself is generated. No matter
the value of topdown, the list of subdirectories is retrieved before the
tuples for the directory and its subdirectories are generated.
By default errors from the os.scandir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
top = fspath(top)
dirs = []
nondirs = []
walk_dirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that scandir is global in this module due
# to earlier import-*.
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
with scandir_it:
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
walk_dirs.append(entry.path)
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
islink, join = path.islink, path.join
for dirname in dirs:
new_path = join(top, dirname)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
else:
# Recurse into sub-directories
for new_path in walk_dirs:
yield from walk(new_path, topdown, onerror, followlinks)
# Yield after recursion if going bottom up
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
if not isinstance(top, int) or not hasattr(top, '__index__'):
top = fspath(top)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except OSError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except OSError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
continue
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except OSError as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
# list() from dict object is an atomic operation
keys = list(self._data)
for key in keys:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
errors = sys.getfilesystemencodeerrors()
def fsencode(filename):
"""Encode filename (an os.PathLike, bytes, or str) to the filesystem
encoding with 'surrogateescape' error handler, return bytes unchanged.
On Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, str):
return filename.encode(encoding, errors)
else:
return filename
def fsdecode(filename):
"""Decode filename (an os.PathLike, bytes, or str) from the filesystem
encoding with 'surrogateescape' error handler, return str unchanged. On
Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
return filename
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
if not isinstance(args, (tuple, list)):
raise TypeError('argv must be a tuple or a list')
if not args or not args[0]:
raise ValueError('argv first element cannot be empty')
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] isn't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
# For testing purposes, make sure the function is available when the C
# implementation exists.
def _fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
# If there is no C implementation, make the pure Python version the
# implementation as transparently as possible.
if not _exists('fspath'):
fspath = _fspath
fspath.__name__ = "fspath"
class PathLike(abc.ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, '__fspath__')
| prefetchnta/questlab | bin/x64bin/python/36/Lib/os.py | Python | lgpl-2.1 | 38,595 | [
"VisIt"
] | b5d3c4c0e5170a83a8800eb795a672287174fcf98c3f3a5e2aded97107b60ea5 |
import numpy as np
import pickle
from time import time, ctime
from datetime import timedelta
from ase.structure import bulk
from ase.units import Hartree
from gpaw import GPAW, FermiDirac
from gpaw.response.gw import GW
from gpaw.xc.hybridk import HybridXC
from gpaw.xc.tools import vxc
from gpaw.mpi import serial_comm, world, rank
starttime = time()
a = 5.431
atoms = bulk('Si', 'diamond', a=a)
kpts = (2,2,2)
calc = GPAW(
h=0.24,
kpts=kpts,
xc='LDA',
txt='Si_gs.txt',
nbands=10,
convergence={'bands':8},
occupations=FermiDirac(0.001)
)
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Si_gs.gpw','all')
nbands=8
bands=np.array([3,4])
ecut=25./Hartree
gwkpt_k = calc.wfs.kd.ibz2bz_k
gwnkpt = calc.wfs.kd.nibzkpts
gwnband = len(bands)
file='Si_gs.gpw'
calc = GPAW(
file,
communicator=serial_comm,
parallel={'domain':1},
txt=None
)
v_xc = vxc(calc)
alpha = 5.0
exx = HybridXC('EXX', alpha=alpha, ecut=ecut, bands=bands)
calc.get_xc_difference(exx)
e_kn = np.zeros((gwnkpt, gwnband), dtype=float)
v_kn = np.zeros((gwnkpt, gwnband), dtype=float)
e_xx = np.zeros((gwnkpt, gwnband), dtype=float)
i = 0
for k in range(gwnkpt):
j = 0
for n in bands:
e_kn[i][j] = calc.get_eigenvalues(kpt=k)[n] / Hartree
v_kn[i][j] = v_xc[0][k][n] / Hartree
e_xx[i][j] = exx.exx_skn[0][k][n]
j += 1
i += 1
data = {
'e_kn': e_kn, # in Hartree
'v_kn': v_kn, # in Hartree
'e_xx': e_xx, # in Hartree
'gwkpt_k': gwkpt_k,
'gwbands_n': bands
}
if rank == 0:
pickle.dump(data, open('EXX.pckl', 'w'), -1)
exxfile='EXX.pckl'
gw = GW(
file=file,
nbands=8,
bands=np.array([3,4]),
w=np.array([10., 30., 0.05]),
ecut=25.,
eta=0.1,
hilbert_trans=False,
exxfile=exxfile
)
gw.get_QP_spectrum()
QP_False = gw.QP_kn * Hartree
gw = GW(
file=file,
nbands=8,
bands=np.array([3,4]),
w=np.array([10., 30., 0.05]),
ecut=25.,
eta=0.1,
hilbert_trans=True,
exxfile=exxfile
)
gw.get_QP_spectrum()
QP_True = gw.QP_kn * Hartree
if not (np.abs(QP_False - QP_True) < 0.01).all():
raise AssertionError("method 1 not equal to method 2")
totaltime = round(time() - starttime)
print "GW test finished in %s " %(timedelta(seconds=totaltime))
| ajylee/gpaw-rtxs | gpaw/test/gw_test.py | Python | gpl-3.0 | 2,541 | [
"ASE",
"GPAW"
] | 7499d34495f155c9c99f749454cb251bbb86577a605b1d136587f5044d8728ea |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import numpy as np
from time import time, sleep
import argparse
parser = argparse.ArgumentParser(description="Benchmark LJ simulations. "
"Save the results to a CSV file.")
parser.add_argument("--particles_per_core", metavar="N", action="store",
type=int, default=1000, required=False,
help="Number of particles in the simulation box")
parser.add_argument("--volume_fraction", metavar="FRAC", action="store",
type=float, default=0.50, required=False,
help="Fraction of the simulation box volume occupied by "
"particles (range: [0.01-0.74], default: 0.50)")
parser.add_argument("--bonds", action="store_true",
help="Add bonds between particle pairs, default: false")
group = parser.add_mutually_exclusive_group()
group.add_argument("--output", metavar="FILEPATH", action="store",
type=str, required=False, default="benchmarks.csv",
help="Output file (default: benchmarks.csv)")
group.add_argument("--visualizer", action="store_true",
help="Starts the visualizer (for debugging purposes)")
args = parser.parse_args()
# process and check arguments
measurement_steps = int(np.round(5e6 / args.particles_per_core, -2))
n_iterations = 30
assert args.volume_fraction > 0, "volume_fraction must be a positive number"
assert args.volume_fraction < np.pi / (3 * np.sqrt(2)), \
"volume_fraction exceeds the physical limit of sphere packing (~0.74)"
assert not (args.bonds and args.volume_fraction > 0.5), \
"volume_fraction too dense (>0.50) for a diatomic liquid, risk of bonds breaking"
if not args.visualizer:
assert(measurement_steps >= 100), \
"{} steps per tick are too short".format(measurement_steps)
import espressomd
if args.visualizer:
from espressomd import visualization
from threading import Thread
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
print(espressomd.features())
# System
#############################################################
system = espressomd.System(box_l=[1, 1, 1])
# Interaction parameters (Lennard-Jones)
#############################################################
lj_eps = 1.0 # LJ epsilon
lj_sig = 1.0 # particle diameter
lj_cut = lj_sig * 2**(1. / 6.) # cutoff distance
# System parameters
#############################################################
n_proc = system.cell_system.get_state()['n_nodes']
n_part = n_proc * args.particles_per_core
# volume of N spheres with radius r: N * (4/3*pi*r^3)
box_l = (n_part * 4. / 3. * np.pi * (lj_sig / 2.)**3
/ args.volume_fraction)**(1. / 3.)
# System
#############################################################
system.box_l = 3 * (box_l,)
# PRNG seeds
#############################################################
# np.random.seed(1)
# Integration parameters
#############################################################
system.time_step = 0.01
system.cell_system.skin = 0.5
system.thermostat.turn_off()
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
if not args.bonds:
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
else:
hb = espressomd.interactions.HarmonicBond(r_0=lj_cut, k=2)
system.bonded_inter.add(hb)
for i in range(0, n_part, 2):
pos = np.random.random(3) * system.box_l
system.part.add(id=i, pos=pos)
system.part.add(id=i + 1, pos=pos + np.random.random(3) / np.sqrt(3))
system.part[i].add_bond((hb, i + 1))
#############################################################
# Warmup Integration #
#############################################################
system.integrator.set_steepest_descent(
f_max=0,
gamma=0.001,
max_displacement=0.01)
# warmup
while system.analysis.energy()["total"] > 3 * n_part:
print("minimization: {:.1f}".format(system.analysis.energy()["total"]))
system.integrator.run(20)
print("minimization: {:.1f}".format(system.analysis.energy()["total"]))
print()
system.integrator.set_vv()
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# tuning and equilibration
print("Tune skin: {}".format(system.cell_system.tune_skin(
min_skin=0.2, max_skin=1, tol=0.05, int_steps=100)))
system.integrator.run(min(5 * measurement_steps, 60000))
print("Tune skin: {}".format(system.cell_system.tune_skin(
min_skin=0.2, max_skin=1, tol=0.05, int_steps=100)))
system.integrator.run(min(10 * measurement_steps, 60000))
print(system.non_bonded_inter[0, 0].lennard_jones)
if not args.visualizer:
# print initial energies
energies = system.analysis.energy()
print(energies)
# time integration loop
print("Timing every {} steps".format(measurement_steps))
main_tick = time()
all_t = []
for i in range(n_iterations):
tick = time()
system.integrator.run(measurement_steps)
tock = time()
t = (tock - tick) / measurement_steps
print("step {}, time = {:.2e}, verlet: {:.2f}, energy: {:.2e}"
.format(i, t, system.cell_system.get_state()["verlet_reuse"],
system.analysis.energy()["total"]))
all_t.append(t)
main_tock = time()
# average time
all_t = np.array(all_t)
avg = np.average(all_t)
ci = 1.96 * np.std(all_t) / np.sqrt(len(all_t) - 1)
print("average: {:.3e} +/- {:.3e} (95% C.I.)".format(avg, ci))
# print final energies
energies = system.analysis.energy()
print(energies)
# write report
cmd = " ".join(x for x in sys.argv[1:] if not x.startswith("--output"))
report = ('"{script}","{arguments}",{cores},{mean:.3e},'
'{ci:.3e},{n},{dur:.1f}\n'.format(
script=os.path.basename(sys.argv[0]), arguments=cmd,
cores=n_proc, dur=main_tock - main_tick, n=measurement_steps,
mean=avg, ci=ci))
if not os.path.isfile(args.output):
report = ('"script","arguments","cores","mean","ci",'
'"nsteps","duration"\n' + report)
with open(args.output, "a") as f:
f.write(report)
else:
# use visualizer
visualizer = visualization.openGLLive(system)
def main_thread():
while True:
system.integrator.run(1)
visualizer.update()
sleep(1 / 60.) # limit frame rate to at most 60 FPS
t = Thread(target=main_thread)
t.daemon = True
t.start()
visualizer.start()
| KaiSzuttor/espresso | maintainer/benchmarks/lj.py | Python | gpl-3.0 | 7,805 | [
"ESPResSo"
] | d296d889f7b1a1319444ce40b7aec856654a5a59ce46776d9e12ce54cd3f0404 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# A script to test the stencil filter.
# removes all but a sphere.
reader = vtk.vtkPNGReader()
reader.SetDataSpacing(0.8,0.8,1.5)
reader.SetDataOrigin(0.0,0.0,0.0)
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
reader.Update()
sphere = vtk.vtkSphere()
sphere.SetCenter(128,128,0)
sphere.SetRadius(80)
functionToStencil = vtk.vtkImplicitFunctionToImageStencil()
functionToStencil.SetInput(sphere)
functionToStencil.SetInformationInput(reader.GetOutput())
functionToStencil.Update()
# test making a copying of the stencil (for coverage)
stencilOriginal = functionToStencil.GetOutput()
stencilCopy = stencilOriginal.NewInstance()
stencilCopy.DeepCopy(functionToStencil.GetOutput())
shiftScale = vtk.vtkImageShiftScale()
shiftScale.SetInputConnection(reader.GetOutputPort())
shiftScale.SetScale(0.2)
shiftScale.Update()
stencil = vtk.vtkImageStencil()
stencil.SetInputConnection(reader.GetOutputPort())
stencil.SetBackgroundInputData(shiftScale.GetOutput())
stencil.SetStencilData(stencilCopy)
stencilCopy.UnRegister(stencil) # not needed in python
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(stencil.GetOutputPort())
viewer.SetZSlice(0)
viewer.SetColorWindow(2000)
viewer.SetColorLevel(1000)
viewer.Render()
# --- end of script --
| hlzz/dotfiles | graphics/VTK-7.0.0/Imaging/Core/Testing/Python/TestStencilWithFunction.py | Python | bsd-3-clause | 1,439 | [
"VTK"
] | 67c3879100433b268dd4b0e0b1e5e5cf040e1736f1af9b4145eb977cb897daf2 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import assert_almost_equal
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import HoomdXMLdata
class TestHoomdXMLParser(ParserBase):
parser = mda.topology.HoomdXMLParser.HoomdXMLParser
ref_filename = HoomdXMLdata
expected_attrs = [
'types', 'masses', 'charges', 'radii', 'bonds', 'angles', 'dihedrals', 'impropers'
]
expected_n_atoms = 769
expected_n_residues = 1
expected_n_segments = 1
def test_attr_size(self, top):
assert len(top.types) == top.n_atoms
assert len(top.charges) == top.n_atoms
assert len(top.masses) == top.n_atoms
def test_bonds(self, top):
assert len(top.bonds.values) == 704
assert isinstance(top.bonds.values[0], tuple)
def test_angles(self, top):
assert len(top.angles.values) == 640
assert isinstance(top.angles.values[0], tuple)
def test_dihedrals(self, top):
assert len(top.dihedrals.values) == 576
assert isinstance(top.dihedrals.values[0], tuple)
def test_impropers(self, top):
assert len(top.impropers.values) == 0
def test_bonds_identity(self, top):
vals = top.bonds.values
for b in ((0, 1), (1, 2), (2, 3), (3, 4)):
assert (b in vals) or (b[::-1] in vals)
assert ((0, 450) not in vals)
def test_angles_identity(self, top):
vals = top.angles.values
for b in ((0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5)):
assert (b in vals) or (b[::-1] in vals)
assert ((0, 350, 450) not in vals)
def test_dihedrals_identity(self, top):
vals = top.dihedrals.values
for b in ((0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5), (3, 4, 5, 6)):
assert (b in vals) or (b[::-1] in vals)
assert ((0, 250, 350, 450) not in vals)
def test_read_masses(self, top):
assert_almost_equal(top.masses.values, 1.0)
def test_read_charges(self, top):
# note: the example topology file contains 0 for all charges which
# is the same as the default so this test does not fully test
# reading of charges from the file (#2888)
assert_almost_equal(top.charges.values, 0.0)
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/topology/test_hoomdxml.py | Python | gpl-2.0 | 3,324 | [
"MDAnalysis"
] | ac3ab9a4cd29595dd6339ef98e111f1c04ccf6c57acf42e8c9fbf92405c06525 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusScale",
]
class MultivariateNormalDiag(
mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
tfd = tf.contrib.distributions
# Initialize a single 2-variate Gaussian.
mvn = tfd.MultivariateNormalDiag(
loc=[1., -1],
scale_diag=[1, 2.])
mvn.mean().eval()
# ==> [1., -1]
mvn.stddev().eval()
# ==> [1., 2]
# Evaluate this on an observation in `R^2`, returning a scalar.
mvn.prob([-1., 0]).eval() # shape: []
# Initialize a 3-batch, 2-variate scaled-identity Gaussian.
mvn = tfd.MultivariateNormalDiag(
loc=[1., -1],
scale_identity_multiplier=[1, 2., 3])
mvn.mean().eval() # shape: [3, 2]
# ==> [[1., -1]
# [1, -1],
# [1, -1]]
mvn.stddev().eval() # shape: [3, 2]
# ==> [[1., 1],
# [2, 2],
# [3, 3]]
# Evaluate this on an observation in `R^2`, returning a length-3 vector.
mvn.prob([-1., 0]).eval() # shape: [3]
# Initialize a 2-batch of 3-variate Gaussians.
mvn = tfd.MultivariateNormalDiag(
loc=[[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag=[[1., 2, 3],
[0.5, 1, 1.5]]) # shape: [2, 3]
# Evaluate this on a two observations, each in `R^3`, returning a length-2
# vector.
x = [[-1., 0, 1],
[-11, 0, 11.]] # shape: [2, 3].
mvn.prob(x).eval() # shape: [2]
```
"""
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
# LinearOperatorDiag has an assert_non_singular method that is called by
# the Bijector.
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
super(MultivariateNormalDiag, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
class MultivariateNormalDiagWithSoftplusScale(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
def __init__(self,
loc,
scale_diag,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[scale_diag]) as name:
super(MultivariateNormalDiagWithSoftplusScale, self).__init__(
loc=loc,
scale_diag=nn.softplus(scale_diag),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
| yanchen036/tensorflow | tensorflow/contrib/distributions/python/ops/mvn_diag.py | Python | apache-2.0 | 8,105 | [
"Gaussian"
] | 8b08c6c9b2c6b2ab054d27868bb0ecb30bcf239b4079a60489311b07fe479b47 |
#!/usr/bin/env python
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
from ase.units import Bohr
import os.path
from ase_utils.kpoints import cubic_kpath
from collections import namedtuple
def plot_phon_from_nc(fname, title='BaT', output_filename='phonon.png'):
"""
read phonon frequencies from .nc file.
"""
ds = Dataset(fname, mode='r')
#ds.variables[u'space_group'][:]
#print ds.variables[u'primitive_vectors'][:]
#print ds.variables.keys()
qpoints = ds.variables[u'qpoints'][:]
print qpoints
phfreqs = ds.variables[u'phfreqs'][:] * 8065.6
phdisps = ds.variables[u'phdispl_cart'][:]
masses = ds.variables[u'atomic_mass_units'][:]
masses = list(masses) + [masses[-1]] * 2
IR_modes = label_all(qpoints, phfreqs, phdisps, masses)
#return
print phdisps[0, 0, :, :] / Bohr
print phdisps[0, 0, :, 0] / Bohr
print get_weight(phdisps[0, 0, :, :], masses)
phfreqs = fix_gamma(qpoints, phfreqs)
weights_A = np.empty_like(phfreqs)
weights_B = np.empty_like(phfreqs)
weights_C = np.empty_like(phfreqs)
nk, nm = phfreqs.shape
print phdisps.shape
print nk, nm
for i in range(nk):
for j in range(nm):
weights_A[i, j], weights_B[i, j], weights_C[i, j] = get_weight(
phdisps[i, j, :, :], masses)
#for i in range(1):
# plt.plot(weights_B[:, i], linewidth=0.1, color='gray')
#plt.plot(weights_A[:, i], linewidth=0.1, color='gray')
#plt.show()
#return
axis = None
kpath = cubic_kpath()
kslist = [kpath[1]] * 15
xticks = [['$\Gamma$', 'X', 'M', '$\Gamma$', 'R', 'X'], kpath[2]]
print len(kslist)
axis = plot_band_weight(
kslist,
phfreqs.T,
weights_A.T,
axis=axis,
color='red',
style='alpha',
xticks=xticks,
title=title)
axis = plot_band_weight(
kslist,
phfreqs.T,
weights_B.T,
axis=axis,
color='green',
style='alpha',
xticks=xticks,
title=title)
axis = plot_band_weight(
kslist,
phfreqs.T,
weights_C.T,
axis=axis,
color='blue',
style='alpha',
xticks=xticks,
title=title)
tick_mode = {'R': -2, 'X': -1, 'M': 2,'Gamma':0}
for qname in IR_modes:
for mode in IR_modes[qname]:
print mode
shiftx = lambda x: x-0.2 if x>0.2 else x+0.01
axis.annotate(
mode[1], (shiftx(xticks[1][tick_mode[qname]]) , mode[0] + 5),
fontsize='x-small',
color='black',wrap=True)
plt.savefig(output_filename,dpi=300)
plt.show()
return qpoints, phfreqs, phdisps, masses
#print ds.variables[u'phfreqs'][:]
#print ds.variables[u'phdispl_cart'][:]
#for k in ds.variables:
# print "--------------\n"
# print k
# print ds.variables[k][:]
def label_all(qpoints, phfreqs, phdisps, masses):
special_qpoints = {
#'Gamma': [0, 0.013333, 0],
'X': [0, 0.5, 0],
'M': [0.5, 0.5, 0],
'R': [0.5, 0.5, 0.5]
}
mode_dict = {}
for i, qpt in enumerate(qpoints):
# print qpt
for qname in special_qpoints:
if np.isclose(
qpt, special_qpoints[qname], rtol=1e-5, atol=1e-3).all():
mode_dict[qname] = []
print "===================================="
print qname
phdisps_q = phdisps[i]
for j, disp in enumerate(phdisps_q):
disp = disp[:, 0] + 1.0j * disp[:, 1]
mode = label(qname, disp, masses)
freq = phfreqs[i][j]
mode_dict[qname].append([freq, mode])
print mode_dict
return mode_dict
def label(qname, phdisp, masses, notation='IR'):
nmode = namedtuple('nmode', [
'Ax', 'Ay', 'Az', 'Bx', 'By', 'Bz', 'O1x', 'O1y', 'O1z', 'O2x', 'O2y',
'O2z', 'O3x', 'O3y', 'O3z'
])
IR_dict = {}
IR_translation={}
IR_translation['Gamma']={
'$\Delta_1$':r'$\Gamma_4^-$',
'$\Delta_2$':r'',
'$\Delta_5$':r'',
}
IR_translation['R']={
r'$\Gamma_2\prime$':'$R_2^-$',
r'$\Gamma_{12}\prime$':'$R_3^-$',
r'$\Gamma_{25}$':'$R_5^-$',
r'$\Gamma_{25}\prime$':'$R_5^+$',
r'$\Gamma_{15}$':'$R_4^-$',
}
IR_translation['X']={
'$M_1$':'$X_1^+$',
'$M_2\prime$':'$X_3^-$',
'$M_3$':'$X_2^+$',
'$M_5$':'$X_5^+$',
'$M_5\prime$':'$X_5^-$',
}
IR_translation['M']={
'$M_1$':'$M_1^+$',
'$M_2$':'$M_3^+$',
'$M_3$':'$M_2^+$',
'$M_4$':'$M_4^+$',
'$M_2\prime$':'$M_3^-$',
'$M_3\prime$':'$M_2^-$',
'$M_5$':'$M_5^+$',
'$M_5\prime$':'$M_5^-$',
}
#with open('names.txt','w') as myfile:
# for q in IR_translation:
# myfile.write('## %s\n\n'%q)
# myfile.write('|Cowley | ? |\n|------|-----|\n')
# for cname in IR_translation[q]:
# myfile.write('| '+cname+' | '+IR_translation[q][cname]+' |\n')
# myfile.write("\n")
zvec=nmode._make([0.0] * 15)
# Gamma point
D1_1=zvec._replace(Ay=1)
D1_2=zvec._replace(By=1)
D1_3=zvec._replace(O3y=1)
D1_4=zvec._replace(O1y=1, O2y=1)
D2 =zvec._replace(O1y=1, O2y=-1)
D5_1=zvec._replace(Ax=1)
D5_2=zvec._replace(Bx=1)
D5_3=zvec._replace(O1x=1)
D5_4=zvec._replace(O2x=1)
D5_5=zvec._replace(O3x=1)
D5_6=zvec._replace(Az=1)
D5_7=zvec._replace(Bz=1)
D5_8=zvec._replace(O1z=1)
D5_9=zvec._replace(O2z=1)
D5_10=zvec._replace(O3z=1)
IR_dict['Gamma'] = {
D1_1: '$\Delta_1$',
D1_2: '$\Delta_1$',
D1_3: '$\Delta_1$',
D1_4: '$\Delta_1$',
D2: '$\Delta_2$',
D5_1: '$\Delta_5$',
D5_2: '$\Delta_5$',
D5_3: '$\Delta_5$',
D5_4: '$\Delta_5$',
D5_5: '$\Delta_5$',
D5_6: '$\Delta_5$',
D5_7: '$\Delta_5$',
D5_8: '$\Delta_5$',
D5_9: '$\Delta_5$',
D5_10:'$\Delta_5$',
}
# X point
X1_1 = nmode._make([0.0] * 15)
X1_1 = X1_1._replace(By=1)
X1_2 = nmode._make([0.0] * 15)
X1_2 = X1_2._replace(O1y=1, O2y=1)
X2p_1 = nmode._make([0.0] * 15)
X2p_1 = X2p_1._replace(Ay=1)
X2p_2 = nmode._make([0.0] * 15)
X2p_2 = X2p_2._replace(O3y=1)
X3 = nmode._make([0.0] * 15)
X3 = X3._replace(O1y=1, O2y=-1)
X5_1 = nmode._make([0.0] * 15)
X5_1 = X5_1._replace(Bx=1)
X5_2 = nmode._make([0.0] * 15)
X5_2 = X5_2._replace(Bz=1)
X5_3 = nmode._make([0.0] * 15)
X5_3 = X5_3._replace(O1x=1)
X5_4 = nmode._make([0.0] * 15)
X5_4 = X5_4._replace(O1z=1)
X5_5 = nmode._make([0.0] * 15)
X5_5 = X5_5._replace(O2x=1)
X5_6 = nmode._make([0.0] * 15)
X5_6 = X5_6._replace(O2z=1)
X5p_1 = nmode._make([0.0] * 15)
X5p_1 = X5_1._replace(Ax=1)
X5p_2 = nmode._make([0.0] * 15)
X5p_2 = X5_2._replace(Az=1)
X5p_3 = nmode._make([0.0] * 15)
X5p_3 = X5_3._replace(O3x=1)
X5p_4 = nmode._make([0.0] * 15)
X5p_4 = X5_4._replace(O3z=1)
IR_dict['X'] = {
X1_1: '$M_1$',
X1_2: '$M_1$',
X2p_1: '$M_2\prime$',
X2p_2: '$M_2\prime$',
X3: '$M_3$',
X5_1: '$M_5$',
X5_2: '$M_5$',
X5_3: '$M_5$',
X5_4: '$M_5$',
X5_5: '$M_5$',
X5_6: '$M_5$',
X5p_1: '$M_5\prime$',
X5p_2: '$M_5\prime$',
X5p_3: '$M_5\prime$',
X5p_4: '$M_5\prime$',
}
# M point
M1 = nmode._make([0.0] * 15)
M1 = M1._replace(O3x=1, O2y=1)
M2 = nmode._make([0.0] * 15)
M2 = M2._replace(O2x=1, O3y=-1)
M3 = nmode._make([0.0] * 15)
M3 = M3._replace(O3x=1, O2y=-1)
M4 = nmode._make([0.0] * 15)
M4 = M4._replace(O2x=1, O3y=1)
M2p = nmode._make([0.0] * 15)
M2p = M2p._replace(Az=1)
M3p_1 = nmode._make([0.0] * 15)
M3p_1 = M3p_1._replace(Bz=1)
M3p_2 = nmode._make([0.0] * 15)
M3p_2 = M3p_2._replace(O1z=1)
M5_1 = nmode._make([0.0] * 15)
M5_1 = M5_1._replace(O3z=1)
M5_2 = nmode._make([0.0] * 15)
M5_2 = M5_2._replace(O2z=1)
M5p_1 = nmode._make([0.0] * 15)
M5p_1 = M5p_1._replace(Bx=1)
M5p_2 = nmode._make([0.0] * 15)
M5p_2 = M5p_2._replace(By=1)
M5p_3 = nmode._make([0.0] * 15)
M5p_3 = M5p_3._replace(Ay=1)
M5p_4 = nmode._make([0.0] * 15)
M5p_4 = M5p_4._replace(Ax=1)
M5p_5 = nmode._make([0.0] * 15)
M5p_5 = M5p_5._replace(O1x=1)
M5p_6 = nmode._make([0.0] * 15)
M5p_6 = M5p_6._replace(O1y=1)
IR_dict['M'] = {
M1: '$M_1$',
M2: '$M_2$',
M3: '$M_3$',
M4: '$M_4$',
M2p: '$M_2\prime$',
M3p_1: '$M_3\prime$',
M3p_2: '$M_3\prime$',
M5_1: '$M_5$',
M5_2: '$M_5$',
M5p_1: '$M_5\prime$',
M5p_2: '$M_5\prime$',
M5p_3: '$M_5\prime$',
M5p_4: '$M_5\prime$',
M5p_5: '$M_5\prime$',
M5p_6: '$M_5\prime$',
}
# R point
G2p = nmode._make([0.0] * 15)
G2p = G2p._replace(O1z=1, O2x=1, O3y=1)
G12p_1 = nmode._make([0.0] * 15)
G12p_1 = G12p_1._replace(O1z=1, O3y=1, O2x=-2)
G12p_2 = nmode._make([0.0] * 15)
G12p_2 = G12p_2._replace(O1z=1, O3y=-1)
G25_1 = nmode._make([0.0] * 15)
G25_1 = G25_1._replace(O1y=1, O3z=-1)
G25_2 = nmode._make([0.0] * 15)
G25_2 = G25_2._replace(O1x=1, O2z=-1)
G25_3 = nmode._make([0.0] * 15)
G25_3 = G25_3._replace(O3x=1, O2y=-1)
G25p_1 = nmode._make([0.0] * 15)
G25p_1 = G25p_1._replace(Bx=1)
G25p_2 = nmode._make([0.0] * 15)
G25p_2 = G25p_2._replace(By=1)
G25p_3 = nmode._make([0.0] * 15)
G25p_3 = G25p_3._replace(Bz=1)
G15_1 = nmode._make([0.0] * 15)
G15_1 = G15_1._replace(Ax=1)
G15_2 = nmode._make([0.0] * 15)
G15_2 = G15_2._replace(Ay=1)
G15_3 = nmode._make([0.0] * 15)
G15_3 = G15_3._replace(Az=1)
G15_4 = nmode._make([0.0] * 15)
G15_4 = G15_4._replace(O1y=1, O3z=1)
G15_5 = nmode._make([0.0] * 15)
G15_5 = G15_5._replace(O1x=1, O2z=1)
G15_6 = nmode._make([0.0] * 15)
G15_6 = G15_6._replace(O3x=1, O2y=1)
IR_dict['R'] = {
G2p: r'$\Gamma_2\prime$',
G12p_1: r'$\Gamma_{12}\prime$',
G12p_2: r'$\Gamma_{12}\prime$',
G25_1: r'$\Gamma_{25}$',
G25_2: r'$\Gamma_{25}$',
G25_3: r'$\Gamma_{25}$',
G25p_1: r'$\Gamma_{25}\prime$',
G25p_2: r'$\Gamma_{25}\prime$',
G25p_3: r'$\Gamma_{25}\prime$',
G15_1: r'$\Gamma_{15}$',
G15_2: r'$\Gamma_{15}$',
G15_3: r'$\Gamma_{15}$',
G15_4: r'$\Gamma_{15}$',
G15_5: r'$\Gamma_{15}$',
G15_6: r'$\Gamma_{15}$',
}
evec = np.array(phdisp) * np.sqrt(np.kron(masses, [1, 1, 1]))
evec = np.real(evec) / np.linalg.norm(evec)
mode = None
for m in IR_dict[qname]:
#print m
mvec = np.real(m)
mvec = mvec / np.linalg.norm(mvec)
#print mvec
p = np.abs(np.dot(np.real(evec), mvec))
#print p
if p > 0.5: #1.0 / np.sqrt(2):
print "-------------"
print "Found! p= %s" % p
print "eigen vector: ", nmode._make(mvec)
if notation == 'Cowley':
mode = IR_dict[qname][m]
else:
print IR_translation[qname]
mode = IR_translation[qname][IR_dict[qname][m]]
print "mode: ", mode, m
#return IR_dict[m]
if mode is None:
print "=============="
print "eigen vector: ", nmode._make(evec)
#return None
return mode
def fix_gamma(qpoints, phfreqs):
for i, qpt in enumerate(qpoints):
if np.isclose(qpt, [0.0, 0.0, 0.0], rtol=1e-5, atol=1e-3).all():
print "Fix"
if i == 0:
phfreqs[i] = phfreqs[i + 1]
else:
phfreqs[i] = phfreqs[i - 1]
return phfreqs
def get_weight(disp, masses):
ms = np.kron(masses, [1, 1, 1])
disp = np.array(disp)
disp = disp[:, 0] + disp[:, 1] * 1j
w = np.real(disp.conj() * disp * ms)
wA = sum(w[0:3])
wB = sum(w[3:6])
wC = sum(w[6:])
s = sum(w)
wA, wB, wC = wA / s, wB / s, wC / s
return wA, wB, wC
def test_read_freq_nc():
fname = 'BaTiO3/abinit_ifc.out_PHBST.nc'
read_phon_freq_nc(fname)
def plot_band_weight(kslist,
ekslist,
wkslist=None,
efermi=0,
yrange=None,
output=None,
style='alpha',
color='blue',
axis=None,
width=10,
xticks=None,
title=None):
if axis is None:
fig, a = plt.subplots()
plt.tight_layout(pad=2.19)
plt.axis('tight')
plt.gcf().subplots_adjust(left=0.17)
else:
a = axis
if title is not None:
a.set_title(title)
xmax = max(kslist[0])
if yrange is None:
yrange = (np.array(ekslist).flatten().min() - 66,
np.array(ekslist).flatten().max() + 66)
if wkslist is not None:
for i in range(len(kslist)):
x = kslist[i]
y = ekslist[i]
lwidths = np.array(wkslist[i]) * width
#lwidths=np.ones(len(x))
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
if style == 'width':
lc = LineCollection(segments, linewidths=lwidths, colors=color)
elif style == 'alpha':
lc = LineCollection(
segments,
linewidths=[2] * len(x),
colors=[
colorConverter.to_rgba(
color, alpha=lwidth / (width + 0.001))
for lwidth in lwidths
])
a.add_collection(lc)
plt.ylabel('Frequency (cm$^{-1}$)')
if axis is None:
for ks, eks in zip(kslist, ekslist):
plt.plot(ks, eks, color='gray', linewidth=0.1)
a.set_xlim(0, xmax)
a.set_ylim(yrange)
if xticks is not None:
plt.xticks(xticks[1], xticks[0])
for x in xticks[1]:
plt.axvline(x, color='gray', linewidth=0.5)
if efermi is not None:
plt.axhline(linestyle='--', color='black')
return a
#plot_phon_from_nc(
# 'BaTiO3/abinit_ifc.out_PHBST.nc',
# title='BaTiO3',
# output_filename='phonon.png')
| mailhexu/pyDFTutils | pyDFTutils/perovskite/plotphon.py | Python | lgpl-3.0 | 14,848 | [
"ASE"
] | 7b6dab3253275aaf8d04e811c7c07e19465897e8893fcef0c1f224c18bc7b92b |
# tasks.py (contains classes for various playfield devices)
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
from copy import copy
import time
class Task(object):
"""A task/coroutine implementation.
Tasks are similar to timers except they can yield back to the main loop
at any point, then be resumed later.
To wait from a Task, do `yield <ms>`, e.g. `yield 200`.
To exit from a Task, just return. This will raise a StopIteration
exception which the scheduler will catch and remove the task from the
run queue.
"""
Tasks = set()
NewTasks = set()
def __init__(self, callback, args=None, name=None, sleep=0):
self.callback = callback
self.args = args
self.wakeup = None
self.name = name
self.gen = None
if sleep:
self.wakeup = time.time() + sleep
def restart(self):
"""Restarts the task."""
self.wakeup = None
self.gen = None
def stop(self):
"""Stops the task.
This causes it not to run any longer, by removing it from the task set
and then deleting it."""
Task.Tasks.remove(self)
def __str__(self):
return "callback=" + str(self.callback) + " wakeup=" + str(self.wakeup)
@staticmethod
def Create(callback, args=tuple(), sleep=0):
"""Creates a new task and insert it into the runnable set."""
task = Task(callback=callback, args=args, sleep=sleep)
Task.NewTasks.add(task)
return task
@staticmethod
def timer_tick():
"""Scans all tasks now and run those that are ready."""
dead_tasks = []
for task in Task.Tasks:
if not task.wakeup or task.wakeup <= time.time():
if task.gen:
try:
rc = next(task.gen)
if rc:
task.wakeup = time.time() + rc
except StopIteration:
dead_tasks.append(task)
else:
task.wakeup = time.time()
task.gen = task.callback(*task.args)
for task in dead_tasks:
Task.Tasks.remove(task)
# We need to queue the addition to new tasks to the set because if we
# get a new task while we're iterating above then our set size will
# change while iterating and produce an error.
for task in Task.NewTasks:
Task.Tasks.add(task)
Task.NewTasks = set()
class DelayManager(object):
"""Parent class for a delay manager which can manage multiple delays."""
delay_managers = set()
dead_delay_managers = set()
# todo it might not make sense to keep each DelayManager as a separate
# class instance. It makes iterating complex and doesn't really add any
# value? (Well, apart from it's easy to wipe all the delays that a single
# module created.) But it might be faster to just have a single delay
# manager for the whole system. Then again, we're only iterating at a
# relatively slow loop rate.
def __init__(self):
self.log = logging.getLogger("DelayManager")
self.delays = {}
DelayManager.delay_managers.add(self)
def __del__(self):
DelayManager.dead_delay_managers.add(self) # todo I don't like this
def add(self, name, ms, callback, **kwargs):
"""Adds a delay.
Args:
name: String name of this delay. This name is arbitrary and only
used to identify the delay later if you want to remove or change
it.
ms: Int of the number of milliseconds you want this delay to be for.
Note that the resolution of this time is based on your
machine's tick rate. The callback will be called on the
first machine tick *after* the delay time has expired. For
example, if you have a machine tick rate of 30Hz, that's 33.33ms
per tick. So if you set a delay for 40ms, the actual delay will
be 66.66ms since that's the next tick time after the delay ends.
callback: The method that is called when this delay ends.
**kwargs: Any other (optional) kwarg pairs you pass will be
passed along as kwargs to the callback method.
"""
self.log.debug("Adding delay. Name: '%s' ms: %s, callback: %s, "
"kwargs: %s", name, ms, callback, kwargs)
self.delays[name] = ({'action_ms': time.time() + (ms / 1000.0),
'callback': callback,
'kwargs': kwargs})
def remove(self, name):
"""Removes a delay. (i.e. prevents the callback from being fired and
cancels the delay.)
Args:
name: String name of the delay you want to remove. If there is no
delay with this name, that's ok. Nothing happens.
"""
self.log.debug("Removing delay: '%s'", name)
try:
del self.delays[name]
except:
pass
def check(self, delay):
"""Checks to see if a delay exists.
Args:
delay: A string of the delay you're checking for.
Returns: The delay object if it exists, or None if not.
"""
if delay in self.delays:
return delay
def reset(self, name, ms, callback, **kwargs):
"""Resets a delay, first deleting the old one (if it exists) and then
adding new delay with the new settings.
Args:
same as add()
"""
self.remove(name)
self.add(name, ms, callback, **kwargs)
def clear(self):
"""Removes (clears) all the delays associated with this DelayManager."""
self.delays = {}
def _process_delays(self):
# Processes any delays that should fire now
for delay in self.delays.keys():
if self.delays[delay]['action_ms'] <= time.time():
# Delete the delay first in case the processing of it adds a
# new delay with the same name. If we delete as the final step
# then we'll inadvertantly delete the newly-set delay
this_delay = copy(self.delays[delay])
del self.delays[delay]
self.log.debug("---Processing delay: %s", this_delay)
if this_delay['kwargs']:
this_delay['callback'](**this_delay['kwargs'])
else:
this_delay['callback']()
@staticmethod
def timer_tick():
# This is kind of complex because we have to account for a delay
# manager being deleted while we're iterating.
live_delay_managers = set()
while DelayManager.delay_managers:
i = DelayManager.delay_managers.pop()
if i not in DelayManager.dead_delay_managers:
i._process_delays()
live_delay_managers.add(i)
DelayManager.delay_managers = live_delay_managers
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| jabdoa2/mpf | mpf/system/tasks.py | Python | mit | 8,393 | [
"Brian"
] | bc734102cf9fc867bfb04c6297d45d24958829a97e58c3c431107d7ffe86acd3 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import os.path
import re
import sys
import warnings
from collections import defaultdict
try:
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py as BuildPy
from setuptools.command.install_lib import install_lib as InstallLib
from setuptools.command.install_scripts import install_scripts as InstallScripts
except ImportError:
print("Ansible now needs setuptools in order to build. Install it using"
" your package manager (usually python-setuptools) or via pip (pip"
" install setuptools).", file=sys.stderr)
sys.exit(1)
# `distutils` must be imported after `setuptools` or it will cause explosions
# with `setuptools >=48.0.0, <49.1`.
# Refs:
# * https://github.com/ansible/ansible/issues/70456
# * https://github.com/pypa/setuptools/issues/2230
# * https://github.com/pypa/setuptools/commit/bd110264
from distutils.command.build_scripts import build_scripts as BuildScripts
from distutils.command.sdist import sdist as SDist
def find_package_info(*file_paths):
try:
with open(os.path.join(*file_paths), 'r') as f:
info_file = f.read()
except Exception:
raise RuntimeError("Unable to find package info.")
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
info_file, re.M)
author_match = re.search(r"^__author__ = ['\"]([^'\"]*)['\"]",
info_file, re.M)
if version_match and author_match:
return version_match.group(1), author_match.group(1)
raise RuntimeError("Unable to find package info.")
def _validate_install_ansible_core():
"""Validate that we can install ansible-core. This checks if
ansible<=2.9 or ansible-base>=2.10 are installed.
"""
# Skip common commands we can ignore
# Do NOT add bdist_wheel here, we don't ship wheels
# and bdist_wheel is the only place we can prevent pip
# from installing, as pip creates a wheel, and installs the wheel
# and we have no influence over installation within a wheel
if set(('sdist', 'egg_info')).intersection(sys.argv):
return
if os.getenv('ANSIBLE_SKIP_CONFLICT_CHECK', '') not in ('', '0'):
return
# Save these for later restoring things to pre invocation
sys_modules = sys.modules.copy()
sys_modules_keys = set(sys_modules)
# Make sure `lib` isn't in `sys.path` that could confuse this
sys_path = sys.path[:]
abspath = os.path.abspath
sys.path[:] = [p for p in sys.path if abspath(p) != abspath('lib')]
try:
from ansible.release import __version__
except ImportError:
pass
else:
version_tuple = tuple(int(v) for v in __version__.split('.')[:2])
if version_tuple >= (2, 11):
return
elif version_tuple == (2, 10):
ansible_name = 'ansible-base'
else:
ansible_name = 'ansible'
stars = '*' * 76
raise RuntimeError(
'''
%s
Cannot install ansible-core with a pre-existing %s==%s
installation.
Installing ansible-core with ansible-2.9 or older, or ansible-base-2.10
currently installed with pip is known to cause problems. Please uninstall
%s and install the new version:
pip uninstall %s
pip install ansible-core
If you want to skip the conflict checks and manually resolve any issues
afterwards, set the ANSIBLE_SKIP_CONFLICT_CHECK environment variable:
ANSIBLE_SKIP_CONFLICT_CHECK=1 pip install ansible-core
%s
''' % (stars, ansible_name, __version__, ansible_name, ansible_name, stars))
finally:
sys.path[:] = sys_path
for key in sys_modules_keys.symmetric_difference(sys.modules):
sys.modules.pop(key, None)
sys.modules.update(sys_modules)
_validate_install_ansible_core()
SYMLINK_CACHE = 'SYMLINK_CACHE.json'
def _find_symlinks(topdir, extension=''):
"""Find symlinks that should be maintained
Maintained symlinks exist in the bin dir or are modules which have
aliases. Our heuristic is that they are a link in a certain path which
point to a file in the same directory.
.. warn::
We want the symlinks in :file:`bin/` that link into :file:`lib/ansible/*` (currently,
:command:`ansible`, :command:`ansible-test`, and :command:`ansible-connection`) to become
real files on install. Updates to the heuristic here *must not* add them to the symlink
cache.
"""
symlinks = defaultdict(list)
for base_path, dirs, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
if os.path.islink(filepath) and filename.endswith(extension):
target = os.readlink(filepath)
if target.startswith('/'):
# We do not support absolute symlinks at all
continue
if os.path.dirname(target) == '':
link = filepath[len(topdir):]
if link.startswith('/'):
link = link[1:]
symlinks[os.path.basename(target)].append(link)
else:
# Count how many directory levels from the topdir we are
levels_deep = os.path.dirname(filepath).count('/')
# Count the number of directory levels higher we walk up the tree in target
target_depth = 0
for path_component in target.split('/'):
if path_component == '..':
target_depth += 1
# If we walk past the topdir, then don't store
if target_depth >= levels_deep:
break
else:
target_depth -= 1
else:
# If we managed to stay within the tree, store the symlink
link = filepath[len(topdir):]
if link.startswith('/'):
link = link[1:]
symlinks[target].append(link)
return symlinks
def _cache_symlinks(symlink_data):
with open(SYMLINK_CACHE, 'w') as f:
json.dump(symlink_data, f)
def _maintain_symlinks(symlink_type, base_path):
"""Switch a real file into a symlink"""
try:
# Try the cache first because going from git checkout to sdist is the
# only time we know that we're going to cache correctly
with open(SYMLINK_CACHE, 'r') as f:
symlink_data = json.load(f)
except (IOError, OSError) as e:
# IOError on py2, OSError on py3. Both have errno
if e.errno == 2:
# SYMLINKS_CACHE doesn't exist. Fallback to trying to create the
# cache now. Will work if we're running directly from a git
# checkout or from an sdist created earlier.
library_symlinks = _find_symlinks('lib', '.py')
library_symlinks.update(_find_symlinks('test/lib'))
symlink_data = {'script': _find_symlinks('bin'),
'library': library_symlinks,
}
# Sanity check that something we know should be a symlink was
# found. We'll take that to mean that the current directory
# structure properly reflects symlinks in the git repo
if 'ansible-playbook' in symlink_data['script']['ansible']:
_cache_symlinks(symlink_data)
else:
raise RuntimeError(
"Pregenerated symlink list was not present and expected "
"symlinks in ./bin were missing or broken. "
"Perhaps this isn't a git checkout?"
)
else:
raise
symlinks = symlink_data[symlink_type]
for source in symlinks:
for dest in symlinks[source]:
dest_path = os.path.join(base_path, dest)
if not os.path.islink(dest_path):
try:
os.unlink(dest_path)
except OSError as e:
if e.errno == 2:
# File does not exist which is all we wanted
pass
os.symlink(source, dest_path)
class BuildPyCommand(BuildPy):
def run(self):
BuildPy.run(self)
_maintain_symlinks('library', self.build_lib)
class BuildScriptsCommand(BuildScripts):
def run(self):
BuildScripts.run(self)
_maintain_symlinks('script', self.build_dir)
class InstallLibCommand(InstallLib):
def run(self):
InstallLib.run(self)
_maintain_symlinks('library', self.install_dir)
class InstallScriptsCommand(InstallScripts):
def run(self):
InstallScripts.run(self)
_maintain_symlinks('script', self.install_dir)
class SDistCommand(SDist):
def run(self):
# have to generate the cache of symlinks for release as sdist is the
# only command that has access to symlinks from the git repo
library_symlinks = _find_symlinks('lib', '.py')
library_symlinks.update(_find_symlinks('test/lib'))
symlinks = {'script': _find_symlinks('bin'),
'library': library_symlinks,
}
_cache_symlinks(symlinks)
SDist.run(self)
# Print warnings at the end because no one will see warnings before all the normal status
# output
if os.environ.get('_ANSIBLE_SDIST_FROM_MAKEFILE', False) != '1':
warnings.warn('When setup.py sdist is run from outside of the Makefile,'
' the generated tarball may be incomplete. Use `make snapshot`'
' to create a tarball from an arbitrary checkout or use'
' `cd packaging/release && make release version=[..]` for official builds.',
RuntimeWarning)
def read_file(file_name):
"""Read file and return its contents."""
with open(file_name, 'r') as f:
return f.read()
def read_requirements(file_name):
"""Read requirements file as a list."""
reqs = read_file(file_name).splitlines()
if not reqs:
raise RuntimeError(
"Unable to read requirements from the %s file"
"That indicates this copy of the source code is incomplete."
% file_name
)
return reqs
def get_dynamic_setup_params():
"""Add dynamically calculated setup params to static ones."""
return {
# Retrieve the long description from the README
'long_description': read_file('README.rst'),
'install_requires': read_requirements('requirements.txt'),
}
here = os.path.abspath(os.path.dirname(__file__))
__version__, __author__ = find_package_info(here, 'lib', 'ansible', 'release.py')
static_setup_params = dict(
# Use the distutils SDist so that symlinks are not expanded
# Use a custom Build for the same reason
cmdclass={
'build_py': BuildPyCommand,
'build_scripts': BuildScriptsCommand,
'install_lib': InstallLibCommand,
'install_scripts': InstallScriptsCommand,
'sdist': SDistCommand,
},
name='ansible-core',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='info@ansible.com',
url='https://ansible.com/',
project_urls={
'Bug Tracker': 'https://github.com/ansible/ansible/issues',
'CI: Azure Pipelines': 'https://dev.azure.com/ansible/ansible/',
'Code of Conduct': 'https://docs.ansible.com/ansible/latest/community/code_of_conduct.html',
'Documentation': 'https://docs.ansible.com/ansible/',
'Mailing lists': 'https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information',
'Source Code': 'https://github.com/ansible/ansible',
},
license='GPLv3+',
# Ansible will also make use of a system copy of python-six and
# python-selectors2 if installed but use a Bundled copy if it's not.
python_requires='>=3.8',
package_dir={'': 'lib',
'ansible_test': 'test/lib/ansible_test'},
packages=find_packages('lib') + find_packages('test/lib'),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy',
'bin/ansible-console',
'bin/ansible-connection',
'bin/ansible-vault',
'bin/ansible-config',
'bin/ansible-inventory',
'bin/ansible-test',
],
data_files=[],
# Installing as zip files would break due to references to __file__
zip_safe=False
)
def main():
"""Invoke installation process using setuptools."""
setup_params = dict(static_setup_params, **get_dynamic_setup_params())
ignore_warning_regex = (
r"Unknown distribution option: '(project_urls|python_requires)'"
)
warnings.filterwarnings(
'ignore',
message=ignore_warning_regex,
category=UserWarning,
module='distutils.dist',
)
setup(**setup_params)
warnings.resetwarnings()
if __name__ == '__main__':
main()
| abadger/ansible | setup.py | Python | gpl-3.0 | 14,340 | [
"Galaxy"
] | b15f4a1620f20583460955d7a158029332cee914b1a7bb24024cdbd1284a73b7 |
# ***************************************************************************
# *
# * Copyright (C) 2013-2016 University of Dundee
# * All rights reserved.
# *
# * This file is part of SAMoS (Soft Active Matter on Surfaces) program.
# *
# * SAMoS is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * SAMoS is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# *****************************************************************************
from read_param import *
import numpy as np
# Given importance of radii et al, we have to pass Interaction the information about
# radii (and positions)? Alternatively, pass it just the unchanging particle labels
class Interaction:
def __init__(self,param,radius,ignore=False,debug=False):
self.param=param
self.radius=radius
self.sigma= np.mean(self.radius)
self.ignore=ignore
self.debug=debug
# First step: unpack parameters to find out what kind of types we have, and what kind of potentials
if ((self.param.ntypes==1) or (self.ignore==True)):
if self.param.potential=='soft':
self.k=self.param.pot_params['k']
self.dmax=2*self.sigma
self.mult=1
elif self.param.potential=='soft_attractive':
self.k=self.param.pot_params['k']
self.fact=self.param.pot_params['re_fact']-1.0
self.rmax=1+2.0*self.fact
self.dmax=2*self.sigma
self.mult=self.rmax
elif self.param.potential=='morse':
self.D=self.param.pot_params['D']
self.re=self.param.pot_params['re']
self.a=self.param.pot.params['a']
self.dmax=4*self.sigma
self.mult=self.re
elif self.param.potential=='gaussian':
# give it at least a default neighbour radius ...
self.dmax=2*self.sigma
print "Warning! Gaussian interaction has not yet been implemented!"
elif self.param.potential=='rod':
# CHANGE: Assuming here rods are at default aspect ratio 5
self.dmax=10*self.sigma
self.mult=1.0
print "Warning! Rod interaction has not yet been implemented!"
else:
# give it at least a default neighbour radius ...
self.dmax=2*self.sigma
self.mult=1.0
#print "Warning! Unknown interaction type! "
else:
# give it at least a default neighbour radius ...
self.dmax=2*self.sigma
self.mult=1.0
print "Warning! Multiple types of particles interacting have not yet been implemented!"
def getDmax(self):
return self.dmax
def getMult(self):
return self.mult
# also is -gradient potential
def getForce(self,i,neighbours,drvec,dr):
Fvec=0.0*(np.array(neighbours).transpose()*(drvec).transpose()).transpose()
if ((self.param.ntypes==1) or (self.ignore==True)):
if self.param.potential=='soft':
diff=self.radius[i]+self.radius[neighbours]-dr
fact = 0.5*self.k*diff
Fvec=self.k*((diff/dr).transpose()*(drvec).transpose()).transpose()
return Fvec
elif self.param.potential=='soft_attractive':
scale=self.radius[i]+self.radius[neighbours]
diff=scale-dr
dscaled=diff/scale
rep = [index for index, value in enumerate(dscaled) if value > -self.fact]
#print "Before upshot ones: " + str(len(rep))
att = [index for index, value in enumerate(dscaled) if value <= -self.fact]
#print "Attractive after upshot ones: " + str(len(att))
factor=np.empty((len(neighbours),))
# repulsive ones
factor[rep] = self.k*diff[rep]
# attractive ones
factor[att]=-self.k*(self.rmax*scale[att]-dr[att])
Fvec=((factor/dr).transpose()*(drvec).transpose()).transpose()
return Fvec
elif self.param.potential=='morse':
fnorm=-2*self.a*self.D*np.exp(-self.a*(dr-self.re))*(1-np.exp(-self.a*(dr-self.re)))
Fvec=((fnorm/dr).transpose()*(drvec).transpose()).transpose()
return Fvec
elif self.param.potential=='gaussian':
print "Warning! Gaussian interaction has not yet been implemented! Returning zero force"
return Fvec
elif self.param.potential=='rod':
print "Warning! Rod interaction has not yet been implemented! Returning zero force"
return Fvec
else:
#print "Warning! Unknown interaction type! Returning zero force"
return Fvec
else:
# Do the Morse right now only ... will serve as a template
print "Warning! Multiple types of particles interacting have not yet been implemented! Returning zero force"
return Fvec
def getStresses(self,i,neighbours,drvec,dr):
# Do these all the standard way, including the pressure as trace of the matrix
stress=np.zeros((len(neighbours),3,3))
# First get the forces:
Fvec=self.getForce(i,neighbours,drvec,dr)
for u in range(3):
for v in range(3):
stress[:,u,v]+=0.5*drvec[:,u]*Fvec[:,v]
# Then get the pressure as the trace over the last two components
press=np.trace(stress,axis1=1,axis2=2)
return press,stress
def getEnergy(self,i,neighbours,drvec,dr):
# Note: There is a 0.5 before the energy return statements because as written, every contact is counted twice in the calculation
if ((self.param.ntypes==1) or (self.ignore==True)):
if self.param.potential=='soft':
diff=self.radius[i]+self.radius[neighbours]-dr
fact = 0.5*self.k*diff
eng_val = fact*diff
return 0.5*eng_val
elif self.param.potential=='soft_attractive':
scale=self.radius[i]+self.radius[neighbours]
diff=scale-dr
dscaled=diff/scale
rep = [index for index, value in enumerate(dscaled) if value > -self.fact]
#print "Before upshot ones: " + str(len(rep))
att = [index for index, value in enumerate(dscaled) if value <= -self.fact]
#print "Attractive after upshot ones: " + str(len(att))
factor=np.empty((len(neighbours),))
# repulsive ones
factor[rep] = self.k*diff[rep]
# attractive ones
factor[att]=-self.k*(self.rmax*scale[att]-dr[att])
eng_val=np.empty((len(neighbours),))
eng_val[rep] = 0.5*factor[rep]*diff[rep]
eng0=0.5*self.k*(self.fact*scale[att])**2
eng_val[att] = eng0+(eng0-(factor[att]**2)/self.k)
return 0.5*eng_val
elif self.param.potential=='morse':
eng_val=self.D*(1-np.exp(-self.a*(dr-self.re)))**2
return 0.5*eng_val
elif self.param.potential=='gaussian':
print "Warning! Gaussian interaction has not yet been implemented! Returning zero energy"
return 0
elif self.param.potential=='rod':
print "Warning! Rod interaction has not yet been implemented! Returning zero energy"
return 0
else:
#print "Warning! Unknown interaction type! Returning zero energy"
return 0
else:
# Do the Morse right now only ... will serve as a template
print "Warning! Multiple types of particles interacting have not yet been implemented! Returning zero energy"
return 0
def getStiffness(self,i,neighbours,drvec,dr):
if ((self.param.ntypes==1) or (self.ignore==True)):
if self.param.potential=='soft':
return self.k*np.ones((len(neighbours),))
elif self.param.potential=='soft_attractive':
scale=self.radius[i]+self.radius[neighbours]
diff=scale-dr
dscaled=diff/scale
rep = [index for index, value in enumerate(dscaled) if value > -self.fact]
att = [index for index, value in enumerate(dscaled) if value <= -self.fact]
stiff=np.zeros((len(neighbours),))
stiff[rep]=self.k
stiff[att]=-self.k
return stiff
elif self.param.potential=='morse':
stiff=2.0*self.a**2*self.D*np.exp(-self.a*(dr-self.re))*(2.0*np.exp(-self.a*(dr-self.re))-1)
return stiff
elif self.param.potential=='gaussian':
print "Warning! Gaussian interaction has not yet been implemented! Returning zero stiffness"
return np.zeros((len(neighbours),))
elif self.param.potential=='rod':
print "Warning! Rod interaction has not yet been implemented! Returning zero stiffness"
return np.zeros((len(neighbours),))
else:
#print "Warning! Unknown interaction type! Returning zero stiffness"
return np.zeros((len(neighbours),))
else:
# Do the Morse right now only ... will serve as a template
print "Warning! Multiple types of particles interacting have not yet been implemented! Returning zero stiffness" | sknepneklab/SAMoS | utils/Interaction.py | Python | gpl-3.0 | 8,546 | [
"Gaussian"
] | 743d54825cda22cc2b6d6f65e0e198c772c0dfa3881476de4d50ee41257c389f |
from __future__ import print_function, division
import os,os.path, glob
import logging
import re
import json
from configobj import ConfigObj
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
import numpy.random as rand
import scipy.optimize
try:
import emcee
except ImportError:
emcee = None
try:
from plotutils.plotutils import setfig
except ImportError:
setfig = None
try:
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
except ImportError:
plt = None
gaussian_kde = None
try:
import triangle
except ImportError:
triangle = None
mnest_available = True
try:
import pymultinest
except ImportError:
logging.warning('PyMultiNest not available; only emcee fits will be possible.')
pymultinest = None
mnest_available = False
from .extinction import EXTINCTION
from .passbands import WEFF
class StarModel(object):
"""An object to represent a star, with observed properties, modeled by an Isochrone
This is used to fit a physical stellar model to observed
quantities, e.g. spectroscopic or photometric, based on
an :class:`Isochrone`. Parallax (in miliarcseconds) is
also accepted as an observed quantity.
Note that by default a local metallicity prior, based on SDSS data,
will be used when :func:`StarModel.fit` is called.
:param ic:
:class:`Isochrone` object used to model star.
:param maxAV: (optional)
Maximum allowed extinction (i.e. the extinction @ infinity in direction of star). Default is 1.
:param max_distance: (optional)
Maximum allowed distance (pc). Default is 3000.
:param use_emcee: (optional)
If set to true, then sampling done with emcee rather than MultiNest.
(not recommended unless you have very precise spectroscopic properties).
:param **kwargs:
Keyword arguments must be properties of given isochrone, e.g., logg,
feh, Teff, and/or magnitudes. The values represent measurements of
the star, and must be in (value,error) format. All such keyword
arguments will be held in ``self.properties``. ``parallax`` is
also a valid property, and should be provided in miliarcseconds.
"""
def __init__(self,ic,maxAV=1,max_distance=3000,
use_emcee=False,
min_logg=None, name='',
**kwargs):
self._ic = ic
self.properties = kwargs
self.max_distance = max_distance
self.maxAV = maxAV
self.name = name
self._samples = None
self._mnest_samples = None
self.use_emcee = use_emcee
if not mnest_available:
logging.warning('MultiNest not available; use_emcee being set to True')
self.use_emcee = True
self.min_logg = min_logg
self.n_params = 5 #mass, feh, age, distance, AV
self._props_cleaned = False
self._mnest_basename = None
@property
def ic(self):
if type(self._ic)==type:
self._ic = self._ic()
return self._ic
@classmethod
def from_ini(cls, ic, folder='.', ini_file='star.ini'):
"""
Initialize a StarModel from a .ini file
File should contain all arguments with which to initialize
StarModel.
"""
if not os.path.isabs(ini_file):
ini_file = os.path.join(folder,ini_file)
config = ConfigObj(ini_file)
kwargs = {}
for kw in config.keys():
try:
kwargs[kw] = float(config[kw])
except:
kwargs[kw] = (float(config[kw][0]), float(config[kw][1]))
return cls(ic, **kwargs)
@property
def mags(self):
d = {}
for prop,vals in self.properties.items():
if prop in self.ic.bands:
try:
val,err = vals
except TypeError:
val = vals
d[prop] = val
return d
@property
def mag_errs(self):
d = {}
for prop,vals in self.properties.items():
if prop in self.ic.bands:
try:
val,err = vals
except TypeError:
continue
d[prop] = err
return d
@property
def Teff(self):
if 'Teff' in self.properties:
return self.properties['Teff']
@property
def feh(self):
if 'feh' in self.properties:
return self.properties['feh']
@property
def logg(self):
if 'logg' in self.properties:
return self.properties['logg']
def _clean_props(self):
"""
Makes sure all properties are legit for isochrone.
Not done in __init__ in order to save speed on loading.
"""
remove = []
for p in self.properties.keys():
if not hasattr(self.ic, p) and \
p not in self.ic.bands and p not in ['parallax','feh','age'] and \
not re.search('delta_',p):
remove.append(p)
for p in remove:
del self.properties[p]
if len(remove) > 0:
logging.warning('Properties removed from Model because ' +
'not present in {}: {}'.format(type(self.ic),remove))
remove = []
for p in self.properties.keys():
try:
val = self.properties[p][0]
if not np.isfinite(val):
remove.append(p)
except:
pass
for p in remove:
del self.properties[p]
if len(remove) > 0:
logging.warning('Properties removed from Model because ' +
'value is nan or inf: {}'.format(remove))
self._props_cleaned = True
def add_props(self,**kwargs):
"""
Adds observable properties to ``self.properties``.
"""
for kw,val in kwargs.iteritems():
self.properties[kw] = val
def remove_props(self,*args):
"""
Removes desired properties from ``self.properties``.
"""
for arg in args:
if arg in self.properties:
del self.properties[arg]
@property
def fit_for_distance(self):
"""
``True`` if any of the properties are apparent magnitudes.
"""
for prop in self.properties.keys():
if prop in self.ic.bands:
return True
return False
def loglike(self, *args, **kwargs):
"""For backwards compatibility
"""
return lnpost(*args, **kwargs)
def lnlike(self, p):
"""Log-likelihood of model at given parameters
:param p:
mass, log10(age), feh, [distance, A_V (extinction)].
Final two should only be provided if ``self.fit_for_distance``
is ``True``; that is, apparent magnitudes are provided.
:return:
log-likelihood. Will be -np.inf if values out of range.
"""
if not self._props_cleaned:
self._clean_props()
if not self.use_emcee:
fit_for_distance = True
mass, age, feh, dist, AV = (p[0], p[1], p[2], p[3], p[4])
else:
if len(p)==5:
fit_for_distance = True
mass,age,feh,dist,AV = p
elif len(p)==3:
fit_for_distance = False
mass,age,feh = p
if mass < self.ic.minmass or mass > self.ic.maxmass \
or age < self.ic.minage or age > self.ic.maxage \
or feh < self.ic.minfeh or feh > self.ic.maxfeh:
return -np.inf
if fit_for_distance:
if dist < 0 or AV < 0 or dist > self.max_distance:
return -np.inf
if AV > self.maxAV:
return -np.inf
if self.min_logg is not None:
logg = self.ic.logg(mass,age,feh)
if logg < self.min_logg:
return -np.inf
logl = 0
for prop in self.properties.keys():
try:
val,err = self.properties[prop]
except TypeError:
#property not appropriate for fitting (e.g. no error provided)
continue
if prop in self.ic.bands:
if not fit_for_distance:
raise ValueError('must fit for mass, age, feh, dist, A_V if apparent magnitudes provided.')
mod = self.ic.mag[prop](mass,age,feh) + 5*np.log10(dist) - 5
A = AV*EXTINCTION[prop]
mod += A
elif re.search('delta_',prop):
continue
elif prop=='feh':
mod = feh
elif prop=='parallax':
mod = 1./dist * 1000
else:
mod = getattr(self.ic,prop)(mass,age,feh)
logl += -(val-mod)**2/(2*err**2) + np.log(1/(err*np.sqrt(2*np.pi)))
if np.isnan(logl):
logl = -np.inf
return logl
def lnprior(self, mass, age, feh,
distance=None, AV=None,
use_local_fehprior=True):
"""
log-prior for model parameters
"""
mass_prior = salpeter_prior(mass)
if mass_prior==0:
mass_lnprior = -np.inf
else:
mass_lnprior = np.log(mass_prior)
if np.isnan(mass_lnprior):
logging.warning('mass prior is nan at {}'.format(mass))
age_lnprior = np.log(age * (2/(self.ic.maxage**2-self.ic.minage**2)))
if np.isnan(age_lnprior):
logging.warning('age prior is nan at {}'.format(age))
if use_local_fehprior:
fehdist = local_fehdist(feh)
else:
fehdist = 1/(self.ic.maxfeh - self.ic.minfeh)
feh_lnprior = np.log(fehdist)
if np.isnan(feh_lnprior):
logging.warning('feh prior is nan at {}'.format(feh))
if distance is not None:
if distance <= 0:
distance_lnprior = -np.inf
else:
distance_lnprior = np.log(3/self.max_distance**3 * distance**2)
else:
distance_lnprior = 0
if np.isnan(distance_lnprior):
logging.warning('distance prior is nan at {}'.format(distance))
if AV is not None:
AV_lnprior = np.log(1/self.maxAV)
else:
AV_lnprior = 0
if np.isnan(AV_lnprior):
logging.warning('AV prior is nan at {}'.format(AV))
lnprior = (mass_lnprior + age_lnprior + feh_lnprior +
distance_lnprior + AV_lnprior)
return lnprior
def lnpost(self, p, use_local_fehprior=True):
"""
log-posterior of model at given parameters
"""
if not self.use_emcee:
mass, age, feh, dist, AV = (p[0], p[1], p[2], p[3], p[4])
else:
if len(p)==5:
fit_for_distance = True
mass,age,feh,dist,AV = p
elif len(p)==3:
fit_for_distance = False
mass,age,feh = p
dist = None
AV = None
return (self.lnlike(p) +
self.lnprior(mass, age, feh, dist, AV,
use_local_fehprior=use_local_fehprior))
def maxlike(self,nseeds=50):
"""Returns the best-fit parameters, choosing the best of multiple starting guesses
:param nseeds: (optional)
Number of starting guesses, uniformly distributed throughout
allowed ranges. Default=50.
:return:
list of best-fit parameters: ``[m,age,feh,[distance,A_V]]``.
Note that distance and A_V values will be meaningless unless
magnitudes are present in ``self.properties``.
"""
m0,age0,feh0 = self.ic.random_points(nseeds)
d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nseeds))
AV0 = rand.uniform(0,self.maxAV,size=nseeds)
costs = np.zeros(nseeds)
if self.fit_for_distance:
pfits = np.zeros((nseeds,5))
else:
pfits = np.zeros((nseeds,3))
def fn(p): #fmin is a function *minimizer*
return -1*self.lnpost(p)
for i,m,age,feh,d,AV in zip(range(nseeds),
m0,age0,feh0,d0,AV0):
if self.fit_for_distance:
pfit = scipy.optimize.fmin(fn,[m,age,feh,d,AV],disp=False)
else:
pfit = scipy.optimize.fmin(fn,[m,age,feh],disp=False)
pfits[i,:] = pfit
costs[i] = self.lnpost(pfit)
return pfits[np.argmax(costs),:]
def mnest_prior(self, cube, ndim, nparams):
"""
Transforms unit cube into parameter cube.
Parameters if running multinest must be mass, age, feh, distance, AV.
"""
cube[0] = (self.ic.maxmass - self.ic.minmass)*cube[0] + self.ic.minmass
cube[1] = (self.ic.maxage - self.ic.minage)*cube[1] + self.ic.minage
cube[2] = (self.ic.maxfeh - self.ic.minfeh)*cube[2] + self.ic.minfeh
cube[3] = cube[3]*self.max_distance
cube[4] = cube[4]*self.maxAV
def mnest_loglike(self, cube, ndim, nparams):
"""loglikelihood function for multinest
"""
return self.lnpost(cube)
def fit(self, **kwargs):
"""
Wrapper for either :func:`fit_multinest` or :func:`fit_mcmc`.
Default will be to use MultiNest; set `use_emcee` keyword to `True`
if you want to use MCMC, or just call :func:`fit_mcmc` directly.
"""
if self.use_emcee:
if 'basename' in kwargs:
del kwargs['basename']
if 'verbose' in kwargs:
del kwargs['verbose']
if 'overwrite' in kwargs:
del kwargs['overwrite']
self.fit_mcmc(**kwargs)
else:
self.fit_multinest(**kwargs)
def fit_multinest(self, n_live_points=1000, basename='chains/single-',
verbose=True, refit=False, overwrite=False,
**kwargs):
"""
Fits model using MultiNest, via pymultinest.
:param n_live_points:
Number of live points to use for MultiNest fit.
:param basename:
Where the MulitNest-generated files will live.
By default this will be in a folder named `chains`
in the current working directory. Calling this
will define a `_mnest_basename` attribute for
this object.
:param verbose:
Whether you want MultiNest to talk to you.
:param refit, overwrite:
Set either of these to true if you want to
delete the MultiNest files associated with the
given basename and start over.
:param **kwargs:
Additional keyword arguments will be passed to
:func:`pymultinest.run`.
"""
folder = os.path.abspath(os.path.dirname(basename))
if not os.path.exists(folder):
os.makedirs(folder)
#If previous fit exists, see if it's using the same
# observed properties
prop_nomatch = False
propfile = '{}properties.json'.format(basename)
if os.path.exists(propfile):
with open(propfile) as f:
props = json.load(f)
if set(props.keys()) != set(self.properties.keys()):
prop_nomatch = True
else:
for k,v in props.items():
if np.size(v)==2:
if not self.properties[k][0] == v[0] and \
self.properties[k][1] == v[1]:
props_nomatch = True
else:
if not self.properties[k] == v:
props_nomatch = True
if prop_nomatch and not overwrite:
raise ValueError('Properties not same as saved chains ' +
'(basename {}*). '.format(basename) +
'Use overwrite=True to fit.')
if refit or overwrite:
files = glob.glob('{}*'.format(basename))
[os.remove(f) for f in files]
self._mnest_basename = basename
pymultinest.run(self.mnest_loglike, self.mnest_prior, self.n_params,
n_live_points=n_live_points, outputfiles_basename=basename,
verbose=verbose,
**kwargs)
with open(propfile, 'w') as f:
json.dump(self.properties, f, indent=2)
self._make_samples()
@property
def mnest_analyzer(self):
"""
PyMultiNest Analyzer object associated with fit.
See PyMultiNest documentation for more.
"""
return pymultinest.Analyzer(self.n_params, self._mnest_basename)
@property
def evidence(self):
"""
Log(evidence) from multinest fit
"""
s = self.mnest_analyzer.get_stats()
return (s['global evidence'],s['global evidence error'])
def fit_mcmc(self,nwalkers=300,nburn=200,niter=100,
p0=None,initial_burn=None,
ninitial=100, loglike_kwargs=None,
**kwargs):
"""Fits stellar model using MCMC.
:param nwalkers: (optional)
Number of walkers to pass to :class:`emcee.EnsembleSampler`.
Default is 200.
:param nburn: (optional)
Number of iterations for "burn-in." Default is 100.
:param niter: (optional)
Number of for-keeps iterations for MCMC chain.
Default is 200.
:param p0: (optional)
Initial parameters for emcee. If not provided, then chains
will behave according to whether inital_burn is set.
:param initial_burn: (optional)
If `True`, then initialize walkers first with a random initialization,
then cull the walkers, keeping only those with > 15% acceptance
rate, then reinitialize sampling. If `False`, then just do
normal burn-in. Default is `None`, which will be set to `True` if
fitting for distance (i.e., if there are apparent magnitudes as
properties of the model), and `False` if not.
:param ninitial: (optional)
Number of iterations to test walkers for acceptance rate before
re-initializing.
:param loglike_args:
Any arguments to pass to :func:`StarModel.loglike`, such
as what priors to use.
:param **kwargs:
Additional keyword arguments passed to :class:`emcee.EnsembleSampler`
constructor.
:return:
:class:`emcee.EnsembleSampler` object.
"""
#clear any saved _samples
if self._samples is not None:
self._samples = None
if self.fit_for_distance:
npars = 5
if initial_burn is None:
initial_burn = True
else:
if initial_burn is None:
initial_burn = False
npars = 3
if p0 is None:
m0,age0,feh0 = self.ic.random_points(nwalkers)
d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nwalkers))
AV0 = rand.uniform(0,self.maxAV,size=nwalkers)
if self.fit_for_distance:
p0 = np.array([m0,age0,feh0,d0,AV0]).T
else:
p0 = np.array([m0,age0,feh0]).T
if initial_burn:
sampler = emcee.EnsembleSampler(nwalkers,npars,self.lnpost,
**kwargs)
#ninitial = 300 #should this be parameter?
pos, prob, state = sampler.run_mcmc(p0, ninitial)
wokinds = np.where((sampler.naccepted/ninitial > 0.15) &
(sampler.naccepted/ninitial < 0.4))[0]
i=1
while len(wokinds)==0:
thresh = 0.15 - i*0.02
if thresh < 0:
raise RuntimeError('Initial burn has no acceptance?')
wokinds = np.where((sampler.naccepted/ninitial > thresh) &
(sampler.naccepted/ninitial < 0.4))[0]
i += 1
inds = rand.randint(len(wokinds),size=nwalkers)
p0 = sampler.chain[wokinds[inds],:,:].mean(axis=1) #reset p0
p0 *= (1 + rand.normal(size=p0.shape)*0.01)
else:
p0 = np.array(p0)
p0 = rand.normal(size=(nwalkers,npars))*0.01 + p0.T[None,:]
if self.fit_for_distance:
p0[:,3] *= (1 + rand.normal(size=nwalkers)*0.5)
sampler = emcee.EnsembleSampler(nwalkers,npars,self.lnpost)
pos, prob, state = sampler.run_mcmc(p0, nburn)
sampler.reset()
sampler.run_mcmc(pos, niter, rstate0=state)
self._sampler = sampler
return sampler
def mag_plot(self, height=500, pix_width=20, spacing=20,
edge=0.1, figsize=(8,6)):
bands = np.array(self.mag_errs.keys())
weffs = np.array([WEFF[b] for b in bands])
inds = np.argsort(weffs)
bands = bands[inds]
weffs = weffs[inds]
q = 0.01
minmag = min(np.min([self.samples['{}_mag'.format(b)].quantile(q)
for b in bands]) - edge,
np.min([self.properties[b][0] - edge for b in bands]))
maxmag = max(np.max([self.samples['{}_mag'.format(b)].quantile(1-q)
for b in bands]) + edge,
np.max([self.properties[b][0] + edge for b in bands]))
n_bands = len(bands)
width = n_bands * (pix_width + spacing) + spacing
mag_grid = np.linspace(minmag, maxmag, height)[::-1]
image = np.zeros((height, width))
plt.figure(figsize=figsize)
mids = []
for i,b in enumerate(bands):
col1 = spacing*(i+1) + i*(pix_width)
col2 = spacing*(i+1) + (i+1)*(pix_width)
mids.append((col1 + col2)//2)
vslice = image[:, col1:col2]
kde = gaussian_kde(self.samples['{}_mag'.format(b)])
pdf = kde(mag_grid)
vslice += pdf[:, np.newaxis]
extent = [0, image.shape[1], maxmag, minmag]
plt.imshow(image, aspect='auto', cmap='binary',
extent=extent, origin='lower')
ax = plt.gca()
ax.set_xticks(mids)
ax.set_xticklabels(bands, fontsize=18);
ax.set_ylabel('mag', fontsize=18)
yticks = ax.get_yticks()
ax.set_yticks(yticks[::-1])
plt.tick_params(axis='y', labelsize=16)
for i,(b,m) in enumerate(zip(bands,mids)):
val, err = self.properties[b]
plt.errorbar(m, val, err, marker='o', color='w',
ms=4, lw=5, mec='w', mew=5)
plt.errorbar(m, val, err, marker='o', color='r',
ms=4, lw=3, mec='r', mew=3)
plt.title(self.name, fontsize=20)
return plt.gcf()
def triangle_plots(self, basename=None, format='png',
**kwargs):
"""Returns two triangle plots, one with physical params, one observational
:param basename:
If basename is provided, then plots will be saved as
"[basename]_physical.[format]" and "[basename]_observed.[format]"
:param format:
Format in which to save figures (e.g., 'png' or 'pdf')
:param **kwargs:
Additional keyword arguments passed to :func:`StarModel.triangle`
and :func:`StarModel.prop_triangle`
:return:
* Physical parameters triangle plot (mass, radius, Teff, feh, age, distance)
* Observed properties triangle plot.
"""
if self.fit_for_distance:
fig1 = self.triangle(plot_datapoints=False,
params=['mass','radius','Teff','logg','feh','age',
'distance','AV'],
**kwargs)
else:
fig1 = self.triangle(plot_datapoints=False,
params=['mass','radius','Teff','feh','age'],
**kwargs)
if basename is not None:
plt.savefig('{}_physical.{}'.format(basename,format))
plt.close()
fig2 = self.prop_triangle(**kwargs)
if basename is not None:
plt.savefig('{}_observed.{}'.format(basename,format))
plt.close()
return fig1, fig2
def triangle(self, params=None, query=None, extent=0.999,
**kwargs):
"""
Makes a nifty corner plot.
Uses :func:`triangle.corner`.
:param params: (optional)
Names of columns (from :attr:`StarModel.samples`)
to plot. If ``None``, then it will plot samples
of the parameters used in the MCMC fit-- that is,
mass, age, [Fe/H], and optionally distance and A_V.
:param query: (optional)
Optional query on samples.
:param extent: (optional)
Will be appropriately passed to :func:`triangle.corner`.
:param **kwargs:
Additional keyword arguments passed to :func:`triangle.corner`.
:return:
Figure oject containing corner plot.
"""
if triangle is None:
raise ImportError('please run "pip install triangle_plot".')
if params is None:
if self.fit_for_distance:
params = ['mass', 'age', 'feh', 'distance', 'AV']
else:
params = ['mass', 'age', 'feh']
df = self.samples
if query is not None:
df = df.query(query)
#convert extent to ranges, but making sure
# that truths are in range.
extents = []
remove = []
for i,par in enumerate(params):
m = re.search('delta_(\w+)$',par)
if m:
if type(self) == BinaryStarModel:
b = m.group(1)
values = (df['{}_mag_B'.format(b)] -
df['{}_mag_A'.format(b)])
df[par] = values
else:
remove.append(i)
continue
else:
values = df[par]
qs = np.array([0.5 - 0.5*extent, 0.5 + 0.5*extent])
minval, maxval = values.quantile(qs)
if 'truths' in kwargs:
datarange = maxval - minval
if kwargs['truths'][i] < minval:
minval = kwargs['truths'][i] - 0.05*datarange
if kwargs['truths'][i] > maxval:
maxval = kwargs['truths'][i] + 0.05*datarange
extents.append((minval,maxval))
[params.pop(i) for i in remove]
fig = triangle.corner(df[params], labels=params,
extents=extents, **kwargs)
fig.suptitle(self.name, fontsize=22)
return fig
def prop_triangle(self, **kwargs):
"""
Makes corner plot of only observable properties.
The idea here is to compare the predictions of the samples
with the actual observed data---this can be a quick way to check
if there are outlier properties that aren't predicted well
by the model.
:param **kwargs:
Keyword arguments passed to :func:`StarModel.triangle`.
:return:
Figure object containing corner plot.
"""
truths = []
params = []
for p in self.properties:
try:
val, err = self.properties[p]
except:
continue
if p in self.ic.bands:
params.append('{}_mag'.format(p))
truths.append(val)
elif p=='parallax':
params.append('distance')
truths.append(1/(val/1000.))
else:
params.append(p)
truths.append(val)
return self.triangle(params, truths=truths, **kwargs)
@property
def sampler(self):
"""
Sampler object from MCMC run.
"""
if hasattr(self,'_sampler'):
return self._sampler
else:
raise AttributeError('MCMC must be run to access sampler')
def _make_samples(self):
if not self.use_emcee:
chain = np.loadtxt('{}post_equal_weights.dat'.format(self._mnest_basename))
#for purposes of unit test, sometimes there will be 1-length chain...
if chain.ndim==1:
chain = np.array([chain])
mass = chain[:,0]
age = chain[:,1]
feh = chain[:,2]
distance = chain[:,3]
AV = chain[:,4]
lnprob = chain[:,-1]
else:
#select out only walkers with > 0.15 acceptance fraction
ok_walkers = self.sampler.acceptance_fraction > 0.15
mass = self.sampler.chain[ok_walkers, :, 0].ravel()
age = self.sampler.chain[ok_walkers, :, 1].ravel()
feh = self.sampler.chain[ok_walkers, :, 2].ravel()
if self.fit_for_distance:
distance = self.sampler.chain[ok_walkers, :, 3].ravel()
AV = self.sampler.chain[ok_walkers, :, 4].ravel()
else:
distance = None
AV = 0
lnprob = self.sampler.lnprobability[ok_walkers, :].ravel()
df = self.ic(mass, age, feh,
distance=distance, AV=AV)
df['age'] = age
df['feh'] = feh
if self.fit_for_distance:
df['distance'] = distance
df['AV'] = AV
df['lnprob'] = lnprob
self._samples = df.copy()
@property
def samples(self):
"""Dataframe with samples drawn from isochrone according to posterior
Columns include both the sampling parameters from the MCMC
fit (mass, age, Fe/H, [distance, A_V]), and also evaluation
of the :class:`Isochrone` at each of these sample points---this
is how chains of physical/observable parameters get produced.
"""
if not hasattr(self,'sampler') and self._samples is None:
raise AttributeError('Must run MCMC (or load from file) '+
'before accessing samples')
if self._samples is not None:
df = self._samples
else:
self._make_samples()
df = self._samples
return df
def random_samples(self, n):
"""
Returns a random sampling of given size from the existing samples.
:param n:
Number of samples
:return:
:class:`pandas.DataFrame` of length ``n`` with random samples.
"""
samples = self.samples
inds = rand.randint(len(samples),size=int(n))
newsamples = samples.iloc[inds]
newsamples.reset_index(inplace=True)
return newsamples
def prop_samples(self,prop,return_values=True,conf=0.683):
"""Returns samples of given property, based on MCMC sampling
:param prop:
Name of desired property. Must be column of ``self.samples``.
:param return_values: (optional)
If ``True`` (default), then also return (median, lo_err, hi_err)
corresponding to desired credible interval.
:param conf: (optional)
Desired quantile for credible interval. Default = 0.683.
:return:
:class:`np.ndarray` of desired samples
:return:
Optionally also return summary statistics (median, lo_err, hi_err),
if ``returns_values == True`` (this is default behavior)
"""
samples = self.samples[prop].values
if return_values:
sorted = np.sort(samples)
med = np.median(samples)
n = len(samples)
lo_ind = int(n*(0.5 - conf/2))
hi_ind = int(n*(0.5 + conf/2))
lo = med - sorted[lo_ind]
hi = sorted[hi_ind] - med
return samples, (med,lo,hi)
else:
return samples
def plot_samples(self,prop,fig=None,label=True,
histtype='step',bins=50,lw=3,
**kwargs):
"""Plots histogram of samples of desired property.
:param prop:
Desired property (must be legit column of samples)
:param fig:
Argument for :func:`plotutils.setfig` (``None`` or int).
:param histtype, bins, lw:
Passed to :func:`plt.hist`.
:param **kwargs:
Additional keyword arguments passed to `plt.hist`
:return:
Figure object.
"""
setfig(fig)
samples,stats = self.prop_samples(prop)
fig = plt.hist(samples,bins=bins,normed=True,
histtype=histtype,lw=lw,**kwargs)
plt.xlabel(prop)
plt.ylabel('Normalized count')
if label:
med,lo,hi = stats
plt.annotate('$%.2f^{+%.2f}_{-%.2f}$' % (med,hi,lo),
xy=(0.7,0.8),xycoords='axes fraction',fontsize=20)
return fig
def save_hdf(self, filename, path='', overwrite=False, append=False):
"""Saves object data to HDF file (only works if MCMC is run)
Samples are saved to /samples location under given path,
and object properties are also attached, so suitable for
re-loading via :func:`StarModel.load_hdf`.
:param filename:
Name of file to save to. Should be .h5 file.
:param path: (optional)
Path within HDF file structure to save to.
:param overwrite: (optional)
If ``True``, delete any existing file by the same name
before writing.
:param append: (optional)
If ``True``, then if a file exists, then just the path
within the file will be updated.
"""
if os.path.exists(filename):
store = pd.HDFStore(filename)
if path in store:
store.close()
if overwrite:
os.remove(filename)
elif not append:
raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename))
else:
store.close()
self.samples.to_hdf(filename, '{}/samples'.format(path))
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/samples'.format(path)).attrs
attrs.properties = self.properties
attrs.ic_type = type(self.ic)
attrs.maxAV = self.maxAV
attrs.max_distance = self.max_distance
attrs.min_logg = self.min_logg
attrs.use_emcee = self.use_emcee
attrs._mnest_basename = self._mnest_basename
attrs.name = self.name
store.close()
@classmethod
def load_hdf(cls, filename, path='', name=None):
"""
A class method to load a saved StarModel from an HDF5 file.
File must have been created by a call to :func:`StarModel.save_hdf`.
:param filename:
H5 file to load.
:param path: (optional)
Path within HDF file.
:return:
:class:`StarModel` object.
"""
store = pd.HDFStore(filename)
try:
samples = store['{}/samples'.format(path)]
attrs = store.get_storer('{}/samples'.format(path)).attrs
except:
store.close()
raise
properties = attrs.properties
maxAV = attrs.maxAV
max_distance = attrs.max_distance
min_logg = attrs.min_logg
ic_type = attrs.ic_type
use_emcee = attrs.use_emcee
basename = attrs._mnest_basename
if name is None:
try:
name = attrs.name
except:
name = ''
store.close()
#ic = ic_type() don't need to initialize anymore
mod = cls(ic_type, maxAV=maxAV, max_distance=max_distance,
use_emcee=use_emcee, name=name,
**properties)
mod._samples = samples
mod._mnest_basename = basename
return mod
class BinaryStarModel(StarModel):
"""
Object used to fit two stars at the same distance to given observed properties
Initialize the same way as :class:`StarModel`.
Difference between this object and a regular :class:`StarModel` is that
the fit parameters include two masses: ``mass_A`` and ``mass_B`` instead
of just one.
Notably, this object can also take additional
``delta_mag`` properties, representing contrast measurements of a companion
star. These should be called, e.g., ``delta_r``. This will be an additional
constraint in the model fitting. All the other provided apparent magnitudes
should be the total combined light of the two stars.
"""
def __init__(self, *args, **kwargs):
super(BinaryStarModel, self).__init__(*args, **kwargs)
self.n_params = 6
def lnlike(self, p):
"""Log-likelihood of model at given parameters
:param p:
mass_A, mass_B, log10(age), feh, [distance, A_V (extinction)].
Final two should only be provided if ``self.fit_for_distance``
is ``True``; that is, apparent magnitudes are provided.
:return:
log-likelihood. Will be -np.inf if values out of range.
"""
if not self._props_cleaned:
self._clean_props()
if not self.use_emcee:
fit_for_distance = True
mass_A, mass_B, age, feh, dist, AV = (p[0], p[1], p[2],
p[3], p[4], p[5])
else:
if len(p)==6:
fit_for_distance = True
mass_A, mass_B, age, feh, dist, AV = p
elif len(p)==4:
fit_for_distance = False
mass_A, mass_B, age, feh = p
#keep values in range; enforce mass_A > mass_B
if mass_A < self.ic.minmass or mass_A > self.ic.maxmass \
or mass_B < self.ic.minmass or mass_B > self.ic.maxmass \
or mass_B > mass_A \
or age < self.ic.minage or age > self.ic.maxage \
or feh < self.ic.minfeh or feh > self.ic.maxfeh:
return -np.inf
if fit_for_distance:
if dist < 0 or AV < 0 or dist > self.max_distance:
return -np.inf
if AV > self.maxAV:
return -np.inf
if self.min_logg is not None:
logg = self.ic.logg(mass_A,age,feh)
if logg < self.min_logg:
return -np.inf
logl = 0
for prop in self.properties.keys():
try:
val,err = self.properties[prop]
except TypeError:
#property not appropriate for fitting (e.g. no error provided)
continue
m = re.search('delta_(\w+)',prop)
if prop in self.ic.bands:
if not fit_for_distance:
raise ValueError('must fit for mass, age, feh, dist, A_V '+
'if apparent magnitudes provided.')
mod_A = self.ic.mag[prop](mass_A,age,feh) + 5*np.log10(dist) - 5
mod_B = self.ic.mag[prop](mass_B,age,feh) + 5*np.log10(dist) - 5
A = AV*EXTINCTION[prop]
mod_A += A
mod_B += A
mod = addmags(mod_A, mod_B)
elif m:
band = m.group(1)
mod_A = self.ic.mag[band](mass_A,age,feh) + 5*np.log10(dist) - 5
mod_B = self.ic.mag[band](mass_B,age,feh) + 5*np.log10(dist) - 5
A = AV*EXTINCTION[band]
mod_A += A
mod_B += A
mod = mod_B - mod_A
elif prop=='feh':
mod = feh
elif prop=='parallax':
mod = 1./dist * 1000
else:
mod = getattr(self.ic,prop)(mass_A,age,feh)
logl += -(val-mod)**2/(2*err**2) + np.log(1/(err*np.sqrt(2*np.pi)))
if np.isnan(logl):
logl = -np.inf
return logl
def lnprior(self, mass_A, mass_B, age, feh,
distance=None, AV=None, use_local_fehprior=True):
lnpr = super(BinaryStarModel,self).lnprior(mass_A, age, feh, distance, AV,
use_local_fehprior=use_local_fehprior)
q = mass_B / mass_A
lnpr += np.log(q_prior(q, mass_A))
return lnpr
def lnpost(self, p, use_local_fehprior=True):
if not self.use_emcee:
mass_A,mass_B,age,feh,dist,AV = (p[0], p[1], p[2],
p[3], p[4], p[5])
else:
if len(p)==6:
fit_for_distance = True
mass_A,mass_B,age,feh,dist,AV = p
elif len(p)==4:
fit_for_distance = False
mass_A,mass_B,age,feh = p
dist = None
AV = None
return (self.lnlike(p) +
self.lnprior(mass_A, mass_B, age, feh, dist, AV,
use_local_fehprior=use_local_fehprior))
def maxlike(self,nseeds=50):
"""Returns the best-fit parameters, choosing the best of multiple starting guesses
:param nseeds: (optional)
Number of starting guesses, uniformly distributed throughout
allowed ranges. Default=50.
:return:
list of best-fit parameters: ``[mA,mB,age,feh,[distance,A_V]]``.
Note that distance and A_V values will be meaningless unless
magnitudes are present in ``self.properties``.
"""
mA_0,age0,feh0 = self.ic.random_points(nseeds)
mB_0,foo1,foo2 = self.ic.random_points(nseeds)
mA_fixed = np.maximum(mA_0,mB_0)
mB_fixed = np.minimum(mA_0,mB_0)
mA_0, mB_0 = (mA_fixed, mB_fixed)
d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nseeds))
AV0 = rand.uniform(0,self.maxAV,size=nseeds)
costs = np.zeros(nseeds)
if self.fit_for_distance:
pfits = np.zeros((nseeds,6))
else:
pfits = np.zeros((nseeds,4))
def fn(p): #fmin is a function *minimizer*
return -1*self.lnpost(p)
for i,mA,mB,age,feh,d,AV in zip(range(nseeds),
mA_0,mB_0,age0,feh0,d0,AV0):
if self.fit_for_distance:
pfit = scipy.optimize.fmin(fn,[mA,mB,age,feh,d,AV],disp=False)
else:
pfit = scipy.optimize.fmin(fn,[mA,mB,age,feh],disp=False)
pfits[i,:] = pfit
costs[i] = self.lnpost(pfit)
return pfits[np.argmax(costs),:]
def mnest_prior(self, cube, ndim, nparams):
cube[0] = (self.ic.maxmass - self.ic.minmass)*cube[0] + self.ic.minmass
cube[1] = (cube[0] - self.ic.minmass)*cube[1] + self.ic.minmass
cube[2] = (self.ic.maxage - self.ic.minage)*cube[2] + self.ic.minage
cube[3] = (self.ic.maxfeh - self.ic.minfeh)*cube[3] + self.ic.minfeh
cube[4] = cube[4]*self.max_distance
cube[5] = cube[5]*self.maxAV
def fit_multinest(self, basename='chains/binary-', **kwargs):
super(BinaryStarModel, self).fit_multinest(basename=basename, **kwargs)
def fit_mcmc(self,nwalkers=200,nburn=100,niter=200,
p0=None,initial_burn=None,
ninitial=100, loglike_kwargs=None,
**kwargs):
"""Fits stellar model using MCMC.
See :func:`StarModel.fit_mcmc`
"""
#clear any saved _samples
if self._samples is not None:
self._samples = None
if self.fit_for_distance:
npars = 6
if initial_burn is None:
initial_burn = True
else:
if initial_burn is None:
initial_burn = False
npars = 4
if p0 is None:
mA_0,age0,feh0 = self.ic.random_points(nwalkers)
mB_0,foo1,foo2 = self.ic.random_points(nwalkers)
mA_fixed = np.maximum(mA_0,mB_0)
mB_fixed = np.minimum(mA_0,mB_0)
mA_0, mB_0 = (mA_fixed, mB_fixed)
d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nwalkers))
AV0 = rand.uniform(0,self.maxAV,size=nwalkers)
if self.fit_for_distance:
p0 = np.array([mA_0,mB_0,age0,feh0,d0,AV0]).T
else:
p0 = np.array([mA_0,mB_0,age0,feh0]).T
if initial_burn:
sampler = emcee.EnsembleSampler(nwalkers,npars,self.lnpost,
**kwargs)
#ninitial = 300 #should this be parameter?
pos, prob, state = sampler.run_mcmc(p0, ninitial)
wokinds = np.where((sampler.naccepted/ninitial > 0.15) &
(sampler.naccepted/ninitial < 0.4))[0]
i=1
while len(wokinds)==0:
thresh = 0.15 - i*0.02
if thresh < 0:
raise RuntimeError('Initial burn has no acceptance?')
wokinds = np.where((sampler.naccepted/ninitial > thresh) &
(sampler.naccepted/ninitial < 0.4))[0]
i += 1
inds = rand.randint(len(wokinds),size=nwalkers)
p0 = sampler.chain[wokinds[inds],:,:].mean(axis=1) #reset p0
p0 *= (1 + rand.normal(size=p0.shape)*0.01)
else:
p0 = np.array(p0)
p0 = rand.normal(size=(nwalkers,npars))*0.01 + p0.T[None,:]
if self.fit_for_distance:
p0[:,4] *= (1 + rand.normal(size=nwalkers)*0.5)
sampler = emcee.EnsembleSampler(nwalkers,npars,self.lnpost)
pos, prob, state = sampler.run_mcmc(p0, nburn)
sampler.reset()
sampler.run_mcmc(pos, niter, rstate0=state)
self._sampler = sampler
return sampler
def triangle_plots(self, basename=None, format='png',
**kwargs):
"""Returns two triangle plots, one with physical params, one observational
:param basename:
If basename is provided, then plots will be saved as
"[basename]_physical.[format]" and "[basename]_observed.[format]"
:param format:
Format in which to save figures (e.g., 'png' or 'pdf')
:param **kwargs:
Additional keyword arguments passed to :func:`StarModel.triangle`
and :func:`StarModel.prop_triangle`
:return:
* Physical parameters triangle plot (mass_A, mass_B, radius, Teff, feh, age, distance)
* Observed properties triangle plot.
"""
fig1 = self.triangle(plot_datapoints=False,
params=['mass_A', 'mass_B','radius','Teff','logg','feh','age',
'distance', 'AV'],
**kwargs)
if basename is not None:
plt.savefig('{}_physical.{}'.format(basename,format))
plt.close()
fig2 = self.prop_triangle(**kwargs)
if basename is not None:
plt.savefig('{}_observed.{}'.format(basename,format))
plt.close()
return fig1, fig2
def triangle(self, params=None, **kwargs):
"""
Makes a nifty corner plot.
Uses :func:`triangle.corner`.
:param params: (optional)
Names of columns (from :attr:`StarModel.samples`)
to plot. If ``None``, then it will plot samples
of the parameters used in the MCMC fit-- that is,
mass, age, [Fe/H], and optionally distance and A_V.
:param query: (optional)
Optional query on samples.
:param extent: (optional)
Will be appropriately passed to :func:`triangle.corner`.
:param **kwargs:
Additional keyword arguments passed to :func:`triangle.corner`.
:return:
Figure oject containing corner plot.
"""
if params is None:
params = ['mass_A', 'mass_B', 'age', 'feh', 'distance', 'AV']
super(BinaryStarModel, self).triangle(params=params, **kwargs)
def _make_samples(self):
if not self.use_emcee:
chain = np.loadtxt('{}post_equal_weights.dat'.format(self._mnest_basename))
#for purposes of unit test, sometimes there will be 1-length chain...
if chain.ndim==1:
chain = np.array([chain])
mass_A = chain[:,0]
mass_B = chain[:,1]
age = chain[:,2]
feh = chain[:,3]
distance = chain[:,4]
AV = chain[:,5]
lnprob = chain[:,-1]
else:
#select out legit walkers
ok_walkers = self.sampler.acceptance_fraction > 0.15
mass_A = self.sampler.chain[ok_walkers,:,0].ravel()
mass_B = self.sampler.chain[ok_walkers,:,1].ravel()
age = self.sampler.chain[ok_walkers,:,2].ravel()
feh = self.sampler.chain[ok_walkers,:,3].ravel()
if self.fit_for_distance:
distance = self.sampler.chain[ok_walkers,:,4].ravel()
AV = self.sampler.chain[ok_walkers,:,5].ravel()
else:
distance = None
AV = 0
lnprob = self.sampler.lnprobability[ok_walkers,:].ravel()
df = self.ic(mass_A, age, feh,
distance=distance, AV=AV)
df_B = self.ic(mass_B, age, feh,
distance=distance, AV=AV)
for col in df_B.columns:
m = re.search('_mag$', col)
if m:
df['{}_A'.format(col)] = df[col]
df['{}_B'.format(col)] = df_B[col]
df[col] = addmags(df[col], df_B[col])
df['mass_A'] = df['mass']
df.drop('mass', axis=1, inplace=True)
df['mass_B'] = df_B['mass']
df['radius_B'] = df_B['radius']
df['age'] = age
df['feh'] = feh
if self.fit_for_distance:
df['distance'] = distance
df['AV'] = AV
df['lnprob'] = lnprob
self._samples = df.copy()
class TripleStarModel(StarModel):
"""Just like BinaryStarModel but for three.
Parameters now include mass_A, mass_B, and mass_C
"""
def __init__(self, *args, **kwargs):
super(TripleStarModel, self).__init__(*args, **kwargs)
self.n_params = 7
def lnlike(self, p):
"""Log-likelihood of model at given parameters
:param p:
mass_A, mass_B, mass_C, log10(age), feh, [distance, A_V (extinction)].
Final two should only be provided if ``self.fit_for_distance``
is ``True``; that is, apparent magnitudes are provided.
:return:
log-likelihood. Will be -np.inf if values out of range.
"""
if not self._props_cleaned:
self._clean_props()
if not self.use_emcee:
fit_for_distance = True
mass_A, mass_B, mass_C, age, feh, dist, AV = (p[0], p[1], p[2],
p[3], p[4], p[5],
p[6])
else:
if len(p)==7:
fit_for_distance = True
mass_A, mass_B, mass_C, age, feh, dist, AV = p
elif len(p)==5:
fit_for_distance = False
mass_A, mass_B, mass_C, age, feh = p
#keep values in range; enforce mass_A > mass_B > mass_C
if mass_A < self.ic.minmass or mass_A > self.ic.maxmass \
or mass_B < self.ic.minmass or mass_B > self.ic.maxmass \
or mass_C < self.ic.minmass or mass_C > self.ic.maxmass \
or mass_B > mass_A \
or mass_C > mass_B or mass_C > mass_A \
or age < self.ic.minage or age > self.ic.maxage \
or feh < self.ic.minfeh or feh > self.ic.maxfeh:
return -np.inf
if fit_for_distance:
if dist < 0 or AV < 0 or dist > self.max_distance:
return -np.inf
if AV > self.maxAV:
return -np.inf
if self.min_logg is not None:
logg = self.ic.logg(mass_A,age,feh)
if logg < self.min_logg:
return -np.inf
logl = 0
for prop in self.properties.keys():
try:
val,err = self.properties[prop]
except TypeError:
#property not appropriate for fitting (e.g. no error provided)
continue
if prop in self.ic.bands:
if not fit_for_distance:
raise ValueError('must fit for mass_A, mass_B, mass_C, age, feh, dist,'+
'A_V if apparent magnitudes provided.')
mods = self.ic.mag[prop]([mass_A, mass_B, mass_C],
age, feh) + 5*np.log10(dist) - 5
A = AV*EXTINCTION[prop]
mods += A
mod = addmags(*mods)
elif re.search('delta_',prop):
continue
elif prop=='feh':
mod = feh
elif prop=='parallax':
mod = 1./dist * 1000
else:
mod = getattr(self.ic,prop)(mass_A,age,feh)
logl += -(val-mod)**2/(2*err**2) + np.log(1/(err*np.sqrt(2*np.pi)))
if np.isnan(logl):
logl = -np.inf
return logl
def lnprior(self, mass_A, mass_B, mass_C, age, feh,
distance=None, AV=None, use_local_fehprior=True):
lnpr = super(TripleStarModel,self).lnprior(mass_A, age, feh, distance, AV,
use_local_fehprior=use_local_fehprior)
q1 = mass_B / mass_A
q2 = mass_C / mass_B
lnpr += np.log(q_prior(q1, mass_A))
lnpr += np.log(q_prior(q2, mass_B))
return lnpr
def lnpost(self, p, use_local_fehprior=True):
if not self.use_emcee:
mass_A,mass_B,mass_C,age,feh,dist,AV = (p[0], p[1], p[2],
p[3], p[4], p[5],
p[6])
else:
if len(p)==7:
fit_for_distance = True
mass_A,mass_B,mass_C,age,feh,dist,AV = p
elif len(p)==5:
fit_for_distance = False
mass_A,mass_B,mass_C,age,feh = p
dist = None
AV = None
return (self.lnlike(p) +
self.lnprior(mass_A, mass_B, mass_C, age, feh, dist, AV,
use_local_fehprior=use_local_fehprior))
def maxlike(self,nseeds=50):
"""Returns the best-fit parameters, choosing the best of multiple starting guesses
:param nseeds: (optional)
Number of starting guesses, uniformly distributed throughout
allowed ranges. Default=50.
:return:
list of best-fit parameters: ``[mA,mB,age,feh,[distance,A_V]]``.
Note that distance and A_V values will be meaningless unless
magnitudes are present in ``self.properties``.
"""
mA_0,age0,feh0 = self.ic.random_points(nseeds)
mB_0,foo1,foo2 = self.ic.random_points(nseeds)
mC_0,foo3,foo4 = self.ic.random_points(nseeds)
m_all = np.sort(np.array([mA_0, mB_0, mC_0]), axis=0)
mA_0, mB_0, mC_0 = (m_all[0,:], m_all[1,:], m_all[2,:])
d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nseeds))
AV0 = rand.uniform(0,self.maxAV,size=nseeds)
costs = np.zeros(nseeds)
if self.fit_for_distance:
pfits = np.zeros((nseeds,7))
else:
pfits = np.zeros((nseeds,5))
def fn(p): #fmin is a function *minimizer*
return -1*self.lnpost(p)
for i,mA,mB,mC,age,feh,d,AV in zip(range(nseeds),
mA_0,mB_0,mC_0,age0,feh0,d0,AV0):
if self.fit_for_distance:
pfit = scipy.optimize.fmin(fn,[mA,mB,mC,age,feh,d,AV],disp=False)
else:
pfit = scipy.optimize.fmin(fn,[mA,mB,mC,age,feh],disp=False)
pfits[i,:] = pfit
costs[i] = self.lnpost(pfit)
return pfits[np.argmax(costs),:]
def mnest_prior(self, cube, ndim, nparams):
cube[0] = (self.ic.maxmass - self.ic.minmass)*cube[0] + self.ic.minmass
cube[1] = (cube[0] - self.ic.minmass)*cube[1] + self.ic.minmass
cube[2] = (cube[1] - self.ic.minmass)*cube[2] + self.ic.minmass
cube[3] = (self.ic.maxage - self.ic.minage)*cube[3] + self.ic.minage
cube[4] = (self.ic.maxfeh - self.ic.minfeh)*cube[4] + self.ic.minfeh
cube[5] = cube[5]*self.max_distance
cube[6] = cube[6]*self.maxAV
def fit_multinest(self, basename='chains/triple-', **kwargs):
super(TripleStarModel, self).fit_multinest(basename=basename, **kwargs)
def fit_mcmc(self,nwalkers=200,nburn=100,niter=200,
p0=None,initial_burn=None,
ninitial=100, loglike_kwargs=None,
**kwargs):
"""Fits stellar model using MCMC.
See :func:`StarModel.fit_mcmc`.
"""
#clear any saved _samples
if self._samples is not None:
self._samples = None
if self.fit_for_distance:
npars = 7
if initial_burn is None:
initial_burn = True
else:
if initial_burn is None:
initial_burn = False
npars = 5
if p0 is None:
mA_0,age0,feh0 = self.ic.random_points(nwalkers)
mB_0,foo1,foo2 = self.ic.random_points(nwalkers)
mC_0,foo3,foo4 = self.ic.random_points(nwalkers)
m_all = np.sort(np.array([mA_0, mB_0, mC_0]), axis=0)
mA_0, mB_0, mC_0 = (m_all[0,:], m_all[1,:], m_all[2,:])
d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nwalkers))
AV0 = rand.uniform(0,self.maxAV,size=nwalkers)
if self.fit_for_distance:
p0 = np.array([mA_0,mB_0,mC_0,age0,feh0,d0,AV0]).T
else:
p0 = np.array([mA_0,mB_0,mC_0,age0,feh0]).T
if initial_burn:
sampler = emcee.EnsembleSampler(nwalkers,npars,self.lnpost,
**kwargs)
#ninitial = 300 #should this be parameter?
pos, prob, state = sampler.run_mcmc(p0, ninitial)
wokinds = np.where((sampler.naccepted/ninitial > 0.15) &
(sampler.naccepted/ninitial < 0.4))[0]
i=1
while len(wokinds)==0:
thresh = 0.15 - i*0.02
if thresh < 0:
raise RuntimeError('Initial burn has no acceptance?')
wokinds = np.where((sampler.naccepted/ninitial > thresh) &
(sampler.naccepted/ninitial < 0.4))[0]
i += 1
inds = rand.randint(len(wokinds),size=nwalkers)
p0 = sampler.chain[wokinds[inds],:,:].mean(axis=1) #reset p0
p0 *= (1 + rand.normal(size=p0.shape)*0.01)
else:
p0 = np.array(p0)
p0 = rand.normal(size=(nwalkers,npars))*0.01 + p0.T[None,:]
if self.fit_for_distance:
p0[:,5] *= (1 + rand.normal(size=nwalkers)*0.5) #distance
sampler = emcee.EnsembleSampler(nwalkers,npars,self.lnpost)
pos, prob, state = sampler.run_mcmc(p0, nburn)
sampler.reset()
sampler.run_mcmc(pos, niter, rstate0=state)
self._sampler = sampler
return sampler
def triangle_plots(self, basename=None, format='png',
**kwargs):
"""Returns two triangle plots, one with physical params, one observational
:return:
* Physical parameters triangle plot (mass_A, mass_B, mass_C, radius,
Teff, feh, age, distance)
* Observed properties triangle plot.
"""
fig1 = self.triangle(plot_datapoints=False,
params=['mass_A', 'mass_B', 'mass_C', 'radius',
'Teff','logg','feh','age','distance','AV'],
**kwargs)
if basename is not None:
plt.savefig('{}_physical.{}'.format(basename,format))
plt.close()
fig2 = self.prop_triangle(**kwargs)
if basename is not None:
plt.savefig('{}_observed.{}'.format(basename,format))
plt.close()
return fig1, fig2
def triangle(self, params=None, **kwargs):
"""
Makes a nifty corner plot.
"""
if params is None:
params = ['mass_A', 'mass_B', 'mass_C',
'age', 'feh', 'distance', 'AV']
super(TripleStarModel, self).triangle(params=params, **kwargs)
def _make_samples(self):
if not self.use_emcee:
chain = np.loadtxt('{}post_equal_weights.dat'.format(self._mnest_basename))
#for purposes of unit test, sometimes there will be 1-length chain...
if chain.ndim==1:
chain = np.array([chain])
mass_A = chain[:,0]
mass_B = chain[:,1]
mass_C = chain[:,2]
age = chain[:,3]
feh = chain[:,4]
distance = chain[:,5]
AV = chain[:,6]
lnprob = chain[:,-1]
else:
ok_walkers = self.sampler.acceptance_fraction > 0.15
mass_A = self.sampler.chain[ok_walkers,:,0].ravel()
mass_B = self.sampler.chain[ok_walkers,:,1].ravel()
mass_C = self.sampler.chain[ok_walkers,:,2].ravel()
age = self.sampler.chain[ok_walkers,:,3].ravel()
feh = self.sampler.chain[ok_walkers,:,4].ravel()
if self.fit_for_distance:
distance = self.sampler.chain[ok_walkers,:,5].ravel()
AV = self.sampler.chain[ok_walkers,:,6].ravel()
else:
distance = None
AV = 0
lnprob = self.sampler.lnprobability[ok_walkers,:].ravel()
df = self.ic(mass_A, age, feh,
distance=distance, AV=AV)
df_B = self.ic(mass_B, age, feh,
distance=distance, AV=AV)
df_C = self.ic(mass_C, age, feh,
distance=distance, AV=AV)
for col in df_B.columns:
m = re.search('_mag$', col)
if m:
df['{}_A'.format(col)] = df[col]
df['{}_B'.format(col)] = df_B[col]
df['{}_C'.format(col)] = df_C[col]
df[col] = addmags(df[col], df_B[col], df_C[col])
df['mass_A'] = df['mass']
df.drop('mass', axis=1, inplace=True)
df['mass_B'] = df_B['mass']
df['mass_C'] = df_C['mass']
df['radius_B'] = df_B['radius']
df['radius_C'] = df_C['radius']
df['age'] = age
df['feh'] = feh
if self.fit_for_distance:
df['distance'] = distance
df['AV'] = AV
df['lnprob'] = lnprob
self._samples = df.copy()
#class MultipleStarModel(StarModel, BinaryStarModel, TripleStarModel):
# """
# StarModel where N_stars (1,2, or 3) is a parameter to estimate.
# """
# def binary_loglike(self, *args, **kwargs):
# return BinaryStarModel.loglike(self, *args, **kwargs)
# def triple_loglike(self, *args, **kwargs):
# return TripleStarModel.loglike(self, *args, **kwargs)
#### Utility functions #####
def addmags(*mags):
tot=0
for mag in mags:
tot += 10**(-0.4*mag)
return -2.5*np.log10(tot)
def q_prior(q, m=1, gamma=0.3, qmin=0.1):
"""Default prior on mass ratio q ~ q^gamma
"""
if q < qmin or q > 1:
return 0
C = 1/(1/(gamma+1)*(1 - qmin**(gamma+1)))
return C*q**gamma
def salpeter_prior(m,alpha=-2.35,minmass=0.1,maxmass=10):
C = (1+alpha)/(maxmass**(1+alpha)-minmass**(1+alpha))
if m < minmass or m > maxmass:
return 0
else:
return C*m**(alpha)
def local_fehdist(feh):
"""feh PDF based on local SDSS distribution
From Jo Bovy:
https://github.com/jobovy/apogee/blob/master/apogee/util/__init__.py#L3
2D gaussian fit based on Casagrande (2011)
"""
fehdist= 0.8/0.15*np.exp(-0.5*(feh-0.016)**2./0.15**2.)\
+0.2/0.22*np.exp(-0.5*(feh+0.15)**2./0.22**2.)
return fehdist
| styra/isochrones | isochrones/starmodel.py | Python | mit | 66,539 | [
"Gaussian"
] | 67645aeb6a1fd69246cae8d4d1d8261555f070fb2a513834846f1f1ab30e25a8 |
# -*- coding: utf-8 -*-
"""Volumes of raw image and labeled object data."""
from __future__ import division
from collections import namedtuple
import csv
import logging
import os
import re
import h5py
import math
import numpy as np
from PIL import Image
import pytoml as toml
import requests
from scipy import ndimage
import six
from six.moves import range as xrange
import pyn5
from .config import CONFIG
from .octrees import OctreeVolume
from .util import get_nonzero_aabb
DimOrder = namedtuple('DimOrder', ('X', 'Y', 'Z'))
def partition_volumes(volumes, downsample=True):
"""Paritition volumes into training and validation based on configuration.
Uses the regexes mapping partition sizes and indices in
diluvian.config.TrainingConfig by applying them to matching volumes based
on name.
Parameters
----------
volumes : dict
Dictionary mapping volume name to diluvian.volumes.Volume.
downsample : bool, optional
Whether to downsample partitions automatically.
Returns
-------
training_volumes, validation_volumes : dict
Dictionary mapping volume name to partitioned, downsampled volumes.
"""
def apply_partitioning(volumes, partitioning):
partitioned = {}
for name, vol in six.iteritems(volumes):
partitions = [p for rgx, p in CONFIG.training.partitions.items() if re.match(rgx, name)]
partition_index = [idx for rgx, idx in partitioning.items() if re.match(rgx, name)]
if len(partitions) > 1 or len(partition_index) > 1:
raise ValueError('Volume "{}" matches more than one partition specifier'.format(name))
elif len(partitions) == 1 and len(partition_index) == 1:
v = vol.partition(partitions[0], partition_index[0])
if downsample:
v = v.downsample(CONFIG.volume.resolution)
partitioned[name] = v
return partitioned
training_volumes = apply_partitioning(volumes, CONFIG.training.training_partition)
validation_volumes = apply_partitioning(volumes, CONFIG.training.validation_partition)
return training_volumes, validation_volumes
class SubvolumeBounds(object):
"""Sufficient parameters to extract a subvolume from a volume."""
__slots__ = ('start', 'stop', 'seed', 'label_id', 'label_margin',)
def __init__(self, start=None, stop=None, seed=None, label_id=None, label_margin=None):
assert (start is not None and stop is not None) or seed is not None, "Bounds or seed must be provided"
self.start = start
self.stop = stop
self.seed = seed
self.label_id = label_id
if label_margin is None:
label_margin = np.zeros(3, dtype=np.int64)
self.label_margin = label_margin
@classmethod
def iterable_from_csv(cls, filename):
bounds = []
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for k, v in six.iteritems(row):
if not v:
row[k] = None
elif v[0] == '[':
row[k] = np.fromstring(v[1:-1], sep=' ', dtype=np.int64)
else:
row[k] = int(v)
bounds.append(cls(**row))
return bounds
@classmethod
def iterable_to_csv(cls, bounds, filename):
with open(filename, 'w') as csvfile:
fieldnames = cls.__slots__
writer = csv.writer(csvfile)
writer.writerow(fieldnames)
for bound in bounds:
writer.writerow([getattr(bound, f) for f in fieldnames])
class Subvolume(object):
"""A subvolume of image data and an optional ground truth object mask."""
__slots__ = ('image', 'label_mask', 'seed', 'label_id',)
def __init__(self, image, label_mask, seed, label_id):
self.image = image
self.label_mask = label_mask
self.seed = seed
self.label_id = label_id
def f_a(self):
"""Calculate the mask filling fraction of this subvolume.
Returns
-------
float
Fraction of the subvolume voxels in the object mask.
"""
return np.count_nonzero(self.label_mask) / float(self.label_mask.size)
def has_seed_in_mask(self):
ctr = self.seed - (np.asarray(self.image.shape) - np.asarray(self.label_mask.shape)) // 2
return self.label_mask[tuple(ctr)]
def has_uniform_seed_margin(self, seed_margin=20.0):
"""Test if a subvolume has a margin of uniform label around its seed.
Parameters
----------
seed_margin : float, optional
The minimum acceptable margin of uniform target label around the seed
voxel (in nm, default 20.0).
Returns
-------
bool
True if the rectangular margin around the seed position is uniform.
"""
margin = np.ceil(np.reciprocal(np.array(CONFIG.volume.resolution),
dtype=np.float64) * seed_margin).astype(np.int64)
mask_target = self.label_mask
# If data is unlabeled, can not test so always succeed.
if mask_target is None:
return True
# Seed location in the mask accounting for offset of label from image.
ctr = self.seed - (np.asarray(self.image.shape) - np.asarray(mask_target.shape)) // 2
seed_fov = (ctr - margin, ctr + margin + 1)
seed_region = mask_target[seed_fov[0][0]:seed_fov[1][0],
seed_fov[0][1]:seed_fov[1][1],
seed_fov[0][2]:seed_fov[1][2]]
return np.all(seed_region)
class SubvolumeGenerator(six.Iterator):
"""Combines a volume and a subvolume bounds generator into a generator.
Parameters
----------
volume : Volume
bounds_generator : SubvolumeBoundsGenerator
"""
def __init__(self, volume, bounds_generator):
self.volume = volume
self.bounds_generator = bounds_generator
@property
def shape(self):
return self.bounds_generator.shape
def __iter__(self):
return self
def reset(self):
self.bounds_generator.reset()
def __next__(self):
return self.volume.get_subvolume(six.next(self.bounds_generator))
class ErodedMaskGenerator(six.Iterator):
def __init__(self, subvolume_generator, erosion_px):
self.subvolume_generator = subvolume_generator
self.sel = np.ones(erosion_px * 2 + 1)
@property
def shape(self):
return self.subvolume_generator.shape
def __iter__(self):
return self
def reset(self):
self.subvolume_generator.reset()
def __next__(self):
while True:
subv = six.next(self.subvolume_generator)
subv.label_mask = ndimage.binary_erosion(subv.label_mask, structure=self.sel, border_value=1)
if subv.has_seed_in_mask():
return subv
class RelabelSeedComponentGenerator(six.Iterator):
def __init__(self, subvolume_generator):
self.subvolume_generator = subvolume_generator
@property
def shape(self):
return self.subvolume_generator.shape
def __iter__(self):
return self
def reset(self):
self.subvolume_generator.reset()
def __next__(self):
subv = six.next(self.subvolume_generator)
label_im, _ = ndimage.label(subv.label_mask)
label_axis_margin = (np.array(subv.image.shape) - np.array(subv.label_mask.shape)) // 2
seed_label = label_im[tuple(subv.seed - label_axis_margin)]
subv.label_mask = label_im == seed_label
return subv
class SubvolumeAugmentGenerator(six.Iterator):
"""Base class for subvolume generator augmenters.
Parameters
----------
subvolume_generator : SubvolumeGenerator
return_both : bool
If true, return both the original and augmented volume in sequence.
If false, return either with equal probability.
"""
def __init__(self, subvolume_generator, return_both):
self.subvolume_generator = subvolume_generator
self.return_both = return_both
self.return_single_p = 0.5
self.subvolume = None
@property
def shape(self):
return self.subvolume_generator.shape
def __iter__(self):
return self
def reset(self):
self.subvolume = None
self.subvolume_generator.reset()
def __next__(self):
if self.return_both:
if self.subvolume is None:
self.subvolume = six.next(self.subvolume_generator)
return self.subvolume
else:
subv = self.augment_subvolume()
self.subvolume = None
if subv is None:
return six.next(self)
else:
return subv
else:
self.subvolume = six.next(self.subvolume_generator)
if np.random.sample() < self.return_single_p:
return self.subvolume
else:
subv = self.augment_subvolume()
if subv is None:
return self.subvolume
else:
return subv
def augment_subvolume(self):
raise NotImplementedError('Subclasses must implement this method.')
class ClipSubvolumeImageGenerator(six.Iterator):
"""Clip subvolume image range (default between zero and one).
Useful to apply after a sequence of augmentations.
Parameters
----------
subvolume_generator : SubvolumeGenerator
min_val, max_val : float, optional
"""
def __init__(self, subvolume_generator, min_val=0.0, max_val=1.0):
self.subvolume_generator = subvolume_generator
self.min_val = min_val
self.max_val = max_val
@property
def shape(self):
return self.subvolume_generator.shape
def __iter__(self):
return self
def reset(self):
self.subvolume_generator.reset()
def __next__(self):
subv = six.next(self.subvolume_generator)
return Subvolume(np.clip(subv.image, self.min_val, self.max_val),
subv.label_mask,
subv.seed,
subv.label_id)
class MirrorAugmentGenerator(SubvolumeAugmentGenerator):
"""Repeats subvolumes from a subvolume generator mirrored along an axis.
For each subvolume in the original generator, this generator will yield two
subvolumes: the original subvolume and the subvolume with the image,
label mask, and seed mirrored along a given axis.
Parameters
----------
subvolume_generator : SubvolumeGenerator
return_both : bool
If true, return both the original and augmented volume in sequence.
If false, return either with equal probability.
axis : int
"""
def __init__(self, subvolume_generator, return_both, axis):
super(MirrorAugmentGenerator, self).__init__(subvolume_generator, return_both)
self.axis = axis
def augment_subvolume(self):
subv = self.subvolume
shape = subv.image.shape[self.axis]
seed = subv.seed.copy()
seed[self.axis] = shape - subv.seed[self.axis] - 1
subv = Subvolume(np.flip(subv.image, self.axis),
np.flip(subv.label_mask, self.axis) if subv.label_mask is not None else None,
seed,
subv.label_id)
return subv
class PermuteAxesAugmentGenerator(SubvolumeAugmentGenerator):
"""Repeats subvolumes from a subvolume generator with an axes permutation.
For each subvolume in the original generator, this generator will yield two
subvolumes: the original subvolume and the subvolume with the image,
label mask, and seed axes permuted according to a given axes order.
Parameters
----------
subvolume_generator : SubvolumeGenerator
return_both : bool
If true, return both the original and augmented volume in sequence.
If false, return either with equal probability.
axes : sequence of int
"""
def __init__(self, subvolume_generator, return_both, axes):
super(PermuteAxesAugmentGenerator, self).__init__(subvolume_generator, return_both)
self.axes = list(axes)
def augment_subvolume(self):
subv = self.subvolume
subv = Subvolume(np.transpose(subv.image, self.axes),
np.transpose(subv.label_mask, self.axes) if subv.label_mask is not None else None,
subv.seed[self.axes],
self.subvolume.label_id)
return subv
class MissingDataAugmentGenerator(SubvolumeAugmentGenerator):
"""Repeats subvolumes from a subvolume generator with missing data planes.
For each subvolume in the original generator, this generator will yield the
original subvolume and may yield a subvolume with missing planes of image
and/or label mask data.
Parameters
----------
subvolume_generator : SubvolumeGenerator
return_both : bool
If true, return both the original and augmented volume in sequence.
If false, return either with equal probability.
axis : int
probability : float
Independent probability that each plane of data along axis is missing.
remove_label : bool
Whether to also remove label mask data.
"""
def __init__(self, subvolume_generator, return_both, axis, probability, remove_label=False):
super(MissingDataAugmentGenerator, self).__init__(subvolume_generator, return_both)
self.axis = axis
self.probability = probability
self.remove_label = remove_label
def augment_subvolume(self):
rolls = np.random.sample(self.shape[self.axis])
# Remove the seed plane from possibilities.
rolls[self.subvolume.seed[self.axis]] = 1.1
missing_sections = np.where(rolls < self.probability)
if missing_sections and missing_sections[0].size:
subv = self.subvolume
mask = subv.label_mask.copy() if subv.label_mask is not None and self.remove_label else subv.label_mask
subv = Subvolume(subv.image.copy(),
mask,
subv.seed,
subv.label_id)
slices = [slice(None), slice(None), slice(None)]
slices[self.axis] = missing_sections
subv.image[slices] = 0
if self.remove_label:
label_axis_margin = (subv.image.shape[self.axis] - subv.label_mask.shape[self.axis]) // 2
label_sections = missing_sections[0] - label_axis_margin
label_sections = label_sections[(label_sections >= 0) &
(label_sections < subv.label_mask.shape[self.axis])]
slices[self.axis] = (label_sections,)
subv.label_mask[slices] = False
return subv
else:
# No augmentations to be made. Superclass will automatically return
# next subvolume.
return None
class GaussianNoiseAugmentGenerator(SubvolumeAugmentGenerator):
"""Repeats subvolumes from a subvolume generator with Gaussian noise.
For each subvolume in the original generator, this generator will yield two
subvolumes: the original subvolume and the subvolume with multiplicative
and additive Gaussian noise applied to the image data.
Parameters
----------
subvolume_generator : SubvolumeGenerator
return_both : bool
If true, return both the original and augmented volume in sequence.
If false, return either with equal probability.
axis : int
Axis along which noise will be applied independently. For example,
0 will apply different noise to each z-section. -1 will apply
uniform noise to the entire subvolume.
multiplicative : float
Standard deviation for 1-mean Gaussian multiplicative noise.
multiplicative : float
Standard deviation for 0-mean Gaussian additive noise.
"""
def __init__(self, subvolume_generator, return_both, axis, multiplicative, additive):
super(GaussianNoiseAugmentGenerator, self).__init__(subvolume_generator, return_both)
self.axis = axis
self.multiplicative = multiplicative
self.additive = additive
def augment_subvolume(self):
subv = self.subvolume
# Generate a transformed shape that will apply vector addition
# and multiplication along to correct axis.
shape_xform = np.ones((1, 3), dtype=np.int32).ravel()
shape_xform[self.axis] = -1
dim_size = 1 if self.axis == -1 else self.shape[self.axis]
mul_noise = np.random.normal(1.0, self.multiplicative, dim_size).astype(subv.image.dtype)
add_noise = np.random.normal(0.0, self.additive, dim_size).astype(subv.image.dtype)
subv = Subvolume(subv.image * mul_noise.reshape(shape_xform) + add_noise.reshape(shape_xform),
subv.label_mask,
subv.seed,
subv.label_id)
return subv
class ContrastAugmentGenerator(SubvolumeAugmentGenerator):
"""Repeats subvolumes from a subvolume generator with altered contrast.
For each subvolume in the original generator, this generator will yield the
original subvolume and may yield a subvolume with image intensity contrast.
Currently this augmentation performs simple rescaling of intensity values,
not histogram based methods. This simple approach still yields results
resembling TEM artifacts. A single rescaling is chosen for all selected
sections in each subvolume, not independently per selected section.
Parameters
----------
subvolume_generator : SubvolumeGenerator
return_both : bool
If true, return both the original and augmented volume in sequence.
If false, return either with equal probability.
axis : int
Axis along which contrast may be altered. For example, 0 will alter
contrast by z-sections.
probability : float
Independent probability that each plane of data along axis is altered.
scaling_mean, scaling_std, center_mean, center_std : float
Normal distribution parameters for the rescaling of intensity values.
"""
def __init__(self, subvolume_generator, return_both, axis, probability,
scaling_mean, scaling_std, center_mean, center_std):
super(ContrastAugmentGenerator, self).__init__(subvolume_generator, return_both)
self.axis = axis
self.probability = probability
self.scaling_mean = scaling_mean
self.scaling_std = scaling_std
self.center_mean = center_mean
self.center_std = center_std
def augment_subvolume(self):
rolls = np.random.sample(self.shape[self.axis])
sections = np.where(rolls < self.probability)
if sections and sections[0].size:
subv = self.subvolume
subv = Subvolume(subv.image.copy(),
subv.label_mask,
subv.seed,
subv.label_id)
slices = [slice(None), slice(None), slice(None)]
slices[self.axis] = sections
data = subv.image[slices]
old_min = data.min()
old_max = data.max()
scaling = np.random.normal(self.scaling_mean, self.scaling_std)
center = np.random.normal(self.center_mean, self.center_std)
data = scaling*(data - old_min) + 0.5*scaling*center*(old_max - old_min) + old_min
subv.image[slices] = data
return subv
else:
return None
class MaskedArtifactAugmentGenerator(SubvolumeAugmentGenerator):
"""Repeats subvolumes from a subvolume generator with artifact data added.
For each subvolume in the original generator, this generator will yield the
original subvolume and may yield a subvolume with planes of image mixed
with artifact data from a separate volume.
Parameters
----------
subvolume_generator : SubvolumeGenerator
return_both : bool
If true, return both the original and augmented volume in sequence.
If false, return either with equal probability.
axis : int
probability : float
Independent probability that each plane of data along axis has
artifacts.
artifact_volume_file : string
Filename of an TOML descriptor of an HDF5 dataset with image and mask
data channels. Only the dataset named 'Artifacts' from this descriptor
will be used. Mask data should be a float that will be interpreted
as an alpha for blending image data from this artifact file with
the original subvolume image data.
"""
def __init__(self, subvolume_generator, return_both, axis, probability, artifact_volume_file, cache):
super(MaskedArtifactAugmentGenerator, self).__init__(subvolume_generator, return_both)
self.axis = axis
self.probability = probability
if 'artifacts' not in cache:
vol = HDF5Volume.from_toml(artifact_volume_file)['Artifacts']
cache['mask'] = NdarrayVolume(
vol.world_coord_to_local(vol.resolution),
image_data=vol.world_mat_to_local(vol.mask_data[:]))
vol.mask_data = None
cache['artifacts'] = vol.to_memory_volume()
self.mask = cache['mask']
self.artifacts = cache['artifacts']
artifact_shape = self.shape.copy()
artifact_shape[self.axis] = 1
self.art_bounds_gen = self.artifacts.subvolume_bounds_generator(shape=artifact_shape)
def augment_subvolume(self):
rolls = np.random.sample(self.shape[self.axis])
artifact_sections = np.where(rolls < self.probability)
if artifact_sections and artifact_sections[0].size:
subv = self.subvolume
subv = Subvolume(subv.image.copy(),
subv.label_mask,
subv.seed,
subv.label_id)
slices = [slice(None), slice(None), slice(None)]
for z in artifact_sections[0]:
slices[self.axis] = z
mask_found = False
# Since artifact data is usually sparse, reject patches
# that have all zero mask.
while not mask_found:
art_bounds = six.next(self.art_bounds_gen)
mask = self.mask.get_subvolume(art_bounds).image
if mask.max() == 0.0:
continue
mask_found = True
art = self.artifacts.get_subvolume(art_bounds).image
raw = subv.image[slices]
subv.image[slices] = raw * (1.0 - mask) + art * mask
return subv
else:
return None
class Volume(object):
DIM = DimOrder(Z=0, Y=1, X=2)
def __init__(self, resolution, image_data=None, label_data=None, mask_data=None):
self.resolution = resolution
self.image_data = image_data
self.label_data = label_data
self.mask_data = mask_data
self._mask_bounds = None
def local_coord_to_world(self, a):
return a
def world_coord_to_local(self, a):
return a
def world_mat_to_local(self, m):
return m
@property
def mask_bounds(self):
if self._mask_bounds is not None:
return self._mask_bounds
if self.mask_data is None:
return None
# Explicitly copy the channel to memory. 3x speedup for np ops.
mask_data = self.mask_data[:]
self._mask_bounds = get_nonzero_aabb(mask_data)
return self._mask_bounds
@property
def shape(self):
return tuple(self.world_coord_to_local(np.array(self.image_data.shape)))
def _get_downsample_from_resolution(self, resolution):
resolution = np.asarray(resolution)
downsample = np.log2(np.true_divide(resolution, self.resolution))
if np.any(downsample < 0):
raise ValueError('Requested resolution ({}) is higher than volume resolution ({}). '
'Upsampling is not supported.'.format(resolution, self.resolution))
if not np.all(np.equal(np.mod(downsample, 1), 0)):
raise ValueError('Requested resolution ({}) is not a power-of-2 downsample of '
'volume resolution ({}). '
'This is currently unsupported.'.format(resolution, self.resolution))
return downsample.astype(np.int64)
def downsample(self, resolution):
downsample = self._get_downsample_from_resolution(resolution)
if np.all(np.equal(downsample, 0)):
return self
return DownsampledVolume(self, downsample)
def partition(self, partitioning, partition_index):
if np.array_equal(partitioning, np.ones(3)) and np.array_equal(partition_index, np.zeros(3)):
return self
return PartitionedVolume(self, partitioning, partition_index)
def sparse_wrapper(self, *args):
return SparseWrappedVolume(self, *args)
def subvolume_bounds_generator(self, shape=None, label_margin=None):
return self.SubvolumeBoundsGenerator(self, shape, label_margin)
def subvolume_generator(self, bounds_generator=None, **kwargs):
if bounds_generator is None:
if not kwargs:
raise ValueError('Bounds generator arguments must be provided if no bounds generator is provided.')
bounds_generator = self.subvolume_bounds_generator(**kwargs)
return SubvolumeGenerator(self, bounds_generator)
def get_subvolume(self, bounds):
if bounds.start is None or bounds.stop is None:
raise ValueError('This volume does not support sparse subvolume access.')
image_subvol = self.image_data[
bounds.start[0]:bounds.stop[0],
bounds.start[1]:bounds.stop[1],
bounds.start[2]:bounds.stop[2]]
image_subvol = self.world_mat_to_local(image_subvol)
if np.issubdtype(image_subvol.dtype, np.integer):
image_subvol = image_subvol.astype(np.float32) / 256.0
seed = bounds.seed
if seed is None:
seed = np.array(image_subvol.shape, dtype=np.int64) // 2
if self.label_data is not None:
label_start = bounds.start + bounds.label_margin
label_stop = bounds.stop - bounds.label_margin
label_subvol = self.label_data[
label_start[0]:label_stop[0],
label_start[1]:label_stop[1],
label_start[2]:label_stop[2]]
label_subvol = self.world_mat_to_local(label_subvol)
label_id = bounds.label_id
if label_id is None:
label_id = label_subvol[tuple(seed - bounds.label_margin)]
label_mask = label_subvol == label_id
else:
label_mask = None
label_id = None
return Subvolume(image_subvol, label_mask, seed, label_id)
class SubvolumeBoundsGenerator(six.Iterator):
def __init__(self, volume, shape, label_margin=None):
self.volume = volume
self.shape = shape
self.margin = np.floor_divide(self.shape, 2).astype(np.int64)
if label_margin is None:
label_margin = np.zeros(3, dtype=np.int64)
self.label_margin = label_margin
self.skip_blank_sections = True
self.ctr_min = self.margin
self.ctr_max = (np.array(self.volume.shape) - self.margin - 1).astype(np.int64)
self.random = np.random.RandomState(CONFIG.random_seed)
# If the volume has a mask channel, further limit ctr_min and
# ctr_max to lie inside a margin in the AABB of the mask.
if self.volume.mask_data is not None:
mask_min, mask_max = self.volume.mask_bounds
mask_min = self.volume.world_coord_to_local(mask_min)
mask_max = self.volume.world_coord_to_local(mask_max)
self.ctr_min = np.maximum(self.ctr_min, mask_min + self.label_margin)
self.ctr_max = np.minimum(self.ctr_max, mask_max - self.label_margin - 1)
if np.any(self.ctr_min >= self.ctr_max):
raise ValueError('Cannot generate subvolume bounds: bounds ({}, {}) too small for shape ({})'.format(
np.array_str(self.ctr_min), np.array_str(self.ctr_max), np.array_str(self.shape)))
def __iter__(self):
return self
def reset(self):
self.random.seed(0)
def __next__(self):
while True:
ctr = np.array([self.random.randint(self.ctr_min[n], self.ctr_max[n])
for n in range(3)]).astype(np.int64)
start = ctr - self.margin
stop = ctr + self.margin + np.mod(self.shape, 2).astype(np.int64)
# If the volume has a mask channel, only accept subvolumes
# entirely contained in it.
if self.volume.mask_data is not None:
start_local = self.volume.world_coord_to_local(start + self.label_margin)
stop_local = self.volume.world_coord_to_local(stop - self.label_margin)
mask = self.volume.mask_data[
start_local[0]:stop_local[0],
start_local[1]:stop_local[1],
start_local[2]:stop_local[2]]
if not mask.all():
logging.debug('Skipping subvolume not entirely in mask.')
continue
# Skip subvolumes with seeds in blank sections.
if self.skip_blank_sections and self.volume.image_data is not None:
if self.volume.image_data[tuple(self.volume.local_coord_to_world(ctr))] == 0:
logging.debug('Skipping subvolume with seed in blank section.')
continue
# Only accept subvolumes where the central seed voxel will be
# of a uniform label after downsampling. For more stringent
# seed region uniformity filtering, see has_uniform_seed_margin.
if self.volume.label_data is None:
label_id = None
break
seed_min = self.volume.local_coord_to_world(ctr)
seed_max = self.volume.local_coord_to_world(ctr + 1)
label_ids = self.volume.label_data[
seed_min[0]:seed_max[0],
seed_min[1]:seed_max[1],
seed_min[2]:seed_max[2]]
if (label_ids == label_ids.item(0)).all():
label_id = label_ids.item(0)
break
return SubvolumeBounds(start, stop, label_id=label_id, label_margin=self.label_margin)
class NdarrayVolume(Volume):
"""A NumPy ndarray-backed volume.
Since all volumes assume image and label data are ndarray-like, this class
exists mostly as a bookkeeping convenience to make actual ndarray volumes
explicit.
"""
def __init__(self, *args, **kwargs):
super(NdarrayVolume, self).__init__(*args, **kwargs)
self.image_data.flags.writeable = False
if self.label_data is not None:
self.label_data.flags.writeable = False
class VolumeView(Volume):
def __init__(self, parent, *args, **kwargs):
super(VolumeView, self).__init__(*args, **kwargs)
self.parent = parent
def local_to_parent(self, a):
return a
def local_coord_to_world(self, a):
return self.parent.local_coord_to_world(self.local_to_parent(a))
def parent_to_local(self, a):
return a
def world_coord_to_local(self, a):
return self.parent_to_local(self.parent.world_coord_to_local(a))
def world_mat_to_local(self, m):
return self.parent.world_mat_to_local(m)
@property
def mask_bounds(self):
return self.parent.mask_bounds
@property
def shape(self):
return self.parent.shape
def get_subvolume(self, bounds):
# assumes bounds given are in local coordinates
parent_start = self.local_to_parent(bounds.start) if bounds.start is not None else None
parent_stop = self.local_to_parent(bounds.stop) if bounds.stop is not None else None
parent_seed = self.local_to_parent(bounds.seed) if bounds.seed is not None else None
parent_bounds = SubvolumeBounds(start=parent_start,
stop=parent_stop,
seed=parent_seed,
label_id=bounds.label_id,
label_margin=bounds.label_margin)
return self.parent.get_subvolume(parent_bounds)
class PartitionedVolume(VolumeView):
"""Wrap an existing volume for partitioned access.
Subvolume accesses to this volume will be offset and clipped to a partition
of the wrapped volume.
Parameters
----------
parent : Volume
The volume to wrap.
partitioning : iterable of int
Number of partitions along each axis. Only one axis should be greater
than 1.
partition_index : iterable of int
Index of the partition which this volume will represent.
"""
def __init__(self, parent, partitioning, partition_index):
super(PartitionedVolume, self).__init__(
parent,
parent.resolution,
image_data=parent.image_data,
label_data=parent.label_data,
mask_data=parent.mask_data)
self.partitioning = np.asarray(partitioning)
self.partition_index = np.asarray(partition_index)
partition_shape = np.floor_divide(np.array(self.parent.shape), self.partitioning)
self.bounds = ((np.multiply(partition_shape, self.partition_index)).astype(np.int64),
(np.multiply(partition_shape, self.partition_index + 1)).astype(np.int64))
def local_to_parent(self, a):
return a + self.bounds[0]
def parent_to_local(self, a):
return a - self.bounds[0]
@property
def mask_bounds(self):
if self.parent.mask_bounds is None:
return None
else:
bound_min = np.maximum(self.parent.mask_bounds[0], self.bounds[0])
bound_max = np.minimum(self.parent.mask_bounds[1], self.bounds[1])
return bound_min, bound_max
@property
def shape(self):
return tuple(self.bounds[1] - self.bounds[0])
class DownsampledVolume(VolumeView):
"""Wrap an existing volume for downsampled access.
Subvolume accesses to this volume will be downsampled, but continue to use
the wrapped volume and its data at the original resolution.
Parameters
----------
parent : Volume
The volume to wrap.
downsample : iterable of int
Integral zoom levels to downsample the wrapped volume.
"""
def __init__(self, parent, downsample):
self.scale = np.exp2(downsample).astype(np.int64)
super(DownsampledVolume, self).__init__(
parent,
np.multiply(parent.resolution, self.scale),
image_data=parent.image_data,
label_data=parent.label_data,
mask_data=parent.mask_data)
def local_to_parent(self, a):
return np.multiply(a, self.scale)
def parent_to_local(self, a):
return np.floor_divide(a, self.scale)
@property
def shape(self):
return tuple(np.floor_divide(np.array(self.parent.shape), self.scale))
def get_subvolume(self, bounds):
subvol_shape = bounds.stop - bounds.start
label_shape = subvol_shape - 2 * bounds.label_margin
parent_bounds = SubvolumeBounds(self.local_to_parent(bounds.start),
self.local_to_parent(bounds.stop),
label_margin=self.local_to_parent(bounds.label_margin))
subvol = self.parent.get_subvolume(parent_bounds)
subvol.image = subvol.image.reshape(
[subvol_shape[0], self.scale[0],
subvol_shape[1], self.scale[1],
subvol_shape[2], self.scale[2]]).mean(5).mean(3).mean(1)
if subvol.label_mask is not None:
# Downsample body mask by considering blocks where the majority
# of voxels are in the body to be in the body. Alternatives are:
# - Conjunction (tends to introduce false splits)
# - Disjunction (tends to overdilate and merge)
# - Mode label (computationally expensive)
if CONFIG.volume.label_downsampling == 'conjunction':
subvol.label_mask = subvol.label_mask.reshape(
[label_shape[0], self.scale[0],
label_shape[1], self.scale[1],
label_shape[2], self.scale[2]]).all(5).all(3).all(1)
else:
subvol.label_mask = subvol.label_mask.reshape(
[label_shape[0], self.scale[0],
label_shape[1], self.scale[1],
label_shape[2], self.scale[2]]).mean(5).mean(3).mean(1) > 0.5
# Note that this is not a coordinate xform to parent in the typical
# sense, just a rescaling of the coordinate in the subvolume-local
# coordinates. Hence no similar call in VolumeView.get_subvolume.
subvol.seed = self.parent_to_local(subvol.seed)
return subvol
class SparseWrappedVolume(VolumeView):
"""Wrap a existing volume for memory cached block sparse access."""
def __init__(self, parent, image_leaf_shape=None, label_leaf_shape=None):
if image_leaf_shape is None:
image_leaf_shape = list(CONFIG.model.input_fov_shape)
if label_leaf_shape is None:
label_leaf_shape = list(CONFIG.model.input_fov_shape)
image_data = OctreeVolume(image_leaf_shape,
(np.zeros(3), parent.image_data.shape),
parent.image_data.dtype,
populator=self.image_populator)
label_data = OctreeVolume(label_leaf_shape,
(np.zeros(3), parent.label_data.shape),
parent.label_data.dtype,
populator=self.label_populator)
super(SparseWrappedVolume, self).__init__(
parent,
parent.resolution,
image_data=image_data,
label_data=label_data)
def image_populator(self, bounds):
return self.parent.image_data[
bounds[0][0]:bounds[1][0],
bounds[0][1]:bounds[1][1],
bounds[0][2]:bounds[1][2]]
def label_populator(self, bounds):
return self.parent.label_data[
bounds[0][0]:bounds[1][0],
bounds[0][1]:bounds[1][1],
bounds[0][2]:bounds[1][2]]
class HDF5Volume(Volume):
"""A volume backed by data views to HDF5 file arrays.
Parameters
----------
orig_file : str
Filename of the HDF5 file to load.
image_dataaset : str
Full dataset path including groups to the raw image data array.
label_dataset : str
Full dataset path including groups to the object label data array.
"""
@staticmethod
def from_toml(filename):
from keras.utils.data_utils import get_file
volumes = {}
with open(filename, 'rb') as fin:
datasets = toml.load(fin).get('dataset', [])
for dataset in datasets:
hdf5_file = dataset['hdf5_file']
if dataset.get('use_keras_cache', False):
hdf5_file = get_file(hdf5_file, dataset['download_url'], md5_hash=dataset.get('download_md5', None))
image_dataset = dataset.get('image_dataset', None)
label_dataset = dataset.get('label_dataset', None)
mask_dataset = dataset.get('mask_dataset', None)
mask_bounds = dataset.get('mask_bounds', None)
resolution = dataset.get('resolution', None)
hdf5_pathed_file = os.path.join(os.path.dirname(filename), hdf5_file)
volume = HDF5Volume(hdf5_pathed_file,
image_dataset,
label_dataset,
mask_dataset,
mask_bounds=mask_bounds)
# If the volume configuration specifies an explicit resolution,
# override any provided in the HDF5 itself.
if resolution:
logging.info('Overriding resolution for volume "%s"', dataset['name'])
volume.resolution = np.array(resolution)
volumes[dataset['name']] = volume
return volumes
@staticmethod
def write_file(filename, resolution, **kwargs):
h5file = h5py.File(filename, 'w')
config = {'hdf5_file': os.path.basename(filename)}
channels = ['image', 'label', 'mask']
default_datasets = {
'image': 'volumes/raw',
'label': 'volumes/labels/neuron_ids',
'mask': 'volumes/labels/mask',
}
for channel in channels:
data = kwargs.get('{}_data'.format(channel), None)
dataset_name = kwargs.get('{}_dataset'.format(channel), default_datasets[channel])
if data is not None:
dataset = h5file.create_dataset(dataset_name, data=data, dtype=data.dtype)
dataset.attrs['resolution'] = resolution
config['{}_dataset'.format(channel)] = dataset_name
h5file.close()
return config
def __init__(self, orig_file, image_dataset, label_dataset, mask_dataset, mask_bounds=None):
logging.debug('Loading HDF5 file "{}"'.format(orig_file))
self.file = h5py.File(orig_file, 'r')
self.resolution = None
self._mask_bounds = tuple(map(np.asarray, mask_bounds)) if mask_bounds is not None else None
if image_dataset is None and label_dataset is None:
raise ValueError('HDF5 volume must have either an image or label dataset: {}'.format(orig_file))
if image_dataset is not None:
self.image_data = self.file[image_dataset]
if 'resolution' in self.file[image_dataset].attrs:
self.resolution = np.array(self.file[image_dataset].attrs['resolution'])
if label_dataset is not None:
self.label_data = self.file[label_dataset]
if 'resolution' in self.file[label_dataset].attrs:
resolution = np.array(self.file[label_dataset].attrs['resolution'])
if self.resolution is not None and not np.array_equal(self.resolution, resolution):
logging.warning('HDF5 image and label dataset resolutions differ in %s: %s, %s',
orig_file, self.resolution, resolution)
else:
self.resolution = resolution
else:
self.label_data = None
if mask_dataset is not None:
self.mask_data = self.file[mask_dataset]
else:
self.mask_data = None
if image_dataset is None:
self.image_data = np.full_like(self.label_data, np.NaN, dtype=np.float32)
if self.resolution is None:
self.resolution = np.ones(3)
def to_memory_volume(self):
data = ['image_data', 'label_data', 'mask_data']
data = {
k: self.world_mat_to_local(getattr(self, k)[:])
for k in data if getattr(self, k) is not None}
return NdarrayVolume(self.world_coord_to_local(self.resolution), **data)
class ImageStackVolume(Volume):
"""A volume for block sparse access to image pyramids over HTTP.
Coordinate Systems
----------
Real: Physical coordinates, generally measured in nanometers
World: pixel coordinates, starts at (0,0,0) and accounts for pixel resolution
often (4x4x40) nanometers per pixel
Local: Downsampled pixel space
Parameters
----------
bounds : iterable of int
Shape of the stack at zoom level 0 in pixels.
resolution : iterable of float
Resolution of the stack at zoom level 0 in nm.
tile_width, tile_height : int
Size of tiles in pixels
format_url : str
Format string for building tile URLs from tile parameters.
zoom_level : int, optional
Zoom level to use for this volume.
missing_z : iterable of int, optional
Voxel z-indices where data is not available.
image_leaf_shape : tuple of int or ndarray, optional
Shape of image octree leaves in voxels. Defaults to 10 stacked tiles.
label_leaf_shape : tuple of int or ndarray, optional
Shape of label octree leaves in voxels. Defaults to FFN model FOV.
"""
@staticmethod
def from_catmaid_stack(stack_info, tile_source_parameters):
# See https://catmaid.readthedocs.io/en/stable/tile_sources.html
format_url = {
1: '{source_base_url}{{z}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}',
4: '{source_base_url}{{z}}/{{zoom_level}}/{{row}}_{{col}}.{file_extension}',
5: '{source_base_url}{{zoom_level}}/{{z}}/{{row}}/{{col}}.{file_extension}',
7: '{source_base_url}largeDataTileSource/{tile_width}/{tile_height}/'
'{{zoom_level}}/{{z}}/{{row}}/{{col}}.{file_extension}',
9: '{source_base_url}{{z}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}',
}[tile_source_parameters['tile_source_type']].format(**tile_source_parameters)
bounds = np.flipud(np.array(stack_info['bounds'], dtype=np.int64))
resolution = np.flipud(np.array(stack_info['resolution']))
translation = np.flipud(np.array(stack_info['translation']))
tile_width = int(tile_source_parameters['tile_width'])
tile_height = int(tile_source_parameters['tile_height'])
return ImageStackVolume(bounds, resolution, translation, tile_width, tile_height,
format_url, missing_z=stack_info.get("broken_slices", None))
def from_toml(filename):
volumes = {}
with open(filename, "rb") as fin:
datasets = toml.load(fin).get("ImageStack", [])
for dataset in datasets:
# stack info
si = [
"bounds",
"resolution",
"translation",
"broken_slices",
]
# tile stack parameters
tsp = [
"source_base_url",
"file_extension",
"tile_width",
"tile_height",
"tile_source_type",
]
volume = ImageStackVolume.from_catmaid_stack(
{si[key]: dataset[key] for key in si},
{tsp[key]: dataset[key] for key in tsp},
)
volumes[dataset["title"]] = volume
return volumes
def __init__(self, bounds, orig_resolution, translation, tile_width, tile_height,
tile_format_url, zoom_level=0, missing_z=None, image_leaf_shape=None):
self.orig_bounds = bounds
self.orig_resolution = orig_resolution
self.translation = translation
self.tile_width = tile_width
self.tile_height = tile_height
self.tile_format_url = tile_format_url
self.mask_data = None
self.zoom_level = int(zoom_level)
if missing_z is None:
missing_z = []
self.missing_z = frozenset(missing_z)
if image_leaf_shape is None:
image_leaf_shape = [10, tile_height, tile_width]
self.scale = np.exp2(np.array([0, self.zoom_level, self.zoom_level])).astype(np.int64)
data_shape = (np.zeros(3), np.divide(bounds, self.scale).astype(np.int64))
self.image_data = OctreeVolume(image_leaf_shape,
data_shape,
'float32',
populator=self.image_populator)
self.label_data = None
def local_coord_to_world(self, a):
return np.multiply(a, self.scale)
def world_coord_to_local(self, a):
return np.floor_divide(a, self.scale)
def real_coord_to_world(self, a):
return np.floor_divide(a - self.translation, self.orig_resolution)
def world_coord_to_real(self, a):
return np.multiply(a, self.orig_resolution) + self.translation
@property
def resolution(self):
return self.orig_resolution * np.exp2([0, self.zoom_level, self.zoom_level])
def downsample(self, resolution):
downsample = self._get_downsample_from_resolution(resolution)
zoom_level = np.min(downsample[[self.DIM.X, self.DIM.Y]])
if zoom_level > 0:
return ImageStackVolume(
self.orig_bounds,
self.orig_resolution,
self.translation,
self.tile_width,
self.tile_height,
self.tile_format_url,
zoom_level=self.zoom_level + zoom_level,
missing_z=self.missing_z,
image_leaf_shape=self.image_data.leaf_shape).downsample(resolution)
if np.all(np.equal(downsample, 0)):
return self
return DownsampledVolume(self, downsample)
def subvolume_bounds_generator(self, sparse_margin=None, **kwargs):
if sparse_margin is not None:
if kwargs:
raise ValueError('sparse_margin can not be combined with other arguments.')
return self.SparseSubvolumeBoundsGenerator(self, sparse_margin)
return super(ImageStackVolume, self).subvolume_bounds_generator(**kwargs)
def get_subvolume(self, bounds):
if bounds.start is None or bounds.stop is None:
image_subvol = self.image_data
label_subvol = self.label_data
else:
image_subvol = self.image_data[
bounds.start[0]:bounds.stop[0],
bounds.start[1]:bounds.stop[1],
bounds.start[2]:bounds.stop[2]]
label_subvol = None
if np.issubdtype(image_subvol.dtype, np.integer):
raise ValueError('Sparse volume access does not support image data coercion.')
seed = bounds.seed
if seed is None:
seed = np.array(image_subvol.shape, dtype=np.int64) // 2
return Subvolume(image_subvol, label_subvol, seed, bounds.label_id)
def image_populator(self, bounds):
image_subvol = np.zeros(tuple(bounds[1] - bounds[0]), dtype=np.float32)
col_range = list(map(int, (math.floor(bounds[0][self.DIM.X] / self.tile_width),
math.ceil(bounds[1][self.DIM.X] / self.tile_width))))
row_range = list(map(int, (math.floor(bounds[0][self.DIM.Y] / self.tile_height),
math.ceil(bounds[1][self.DIM.Y] / self.tile_height))))
tile_size = np.array([1, self.tile_height, self.tile_width]).astype(np.int64)
for z in xrange(bounds[0][self.DIM.Z], bounds[1][self.DIM.Z]):
if z in self.missing_z:
image_subvol[int(z - bounds[0][self.DIM.Z]), :, :] = 0
continue
for r in xrange(*row_range):
for c in xrange(*col_range):
url = self.tile_format_url.format(zoom_level=self.zoom_level, z=z, row=r, col=c)
try:
im = np.array(Image.open(requests.get(url, stream=True).raw))
# If the image is multichannel, throw our hands up and
# just use the first channel.
if im.ndim > 2:
im = im[:, :, 0].squeeze()
im = im / 256.0
except IOError:
logging.debug('Failed to load tile: %s', url)
im = np.full((self.tile_height, self.tile_width), 0, dtype=np.float32)
tile_coord = np.array([z, r, c]).astype(np.int64)
tile_loc = np.multiply(tile_coord, tile_size)
subvol = (np.maximum(np.zeros(3), tile_loc - bounds[0]).astype(np.int64),
np.minimum(np.array(image_subvol.shape),
tile_loc + tile_size - bounds[0]).astype(np.int64))
tile_sub = (np.maximum(np.zeros(3), bounds[0] - tile_loc).astype(np.int64),
np.minimum(tile_size, bounds[1] - tile_loc).astype(np.int64))
image_subvol[subvol[0][self.DIM.Z],
subvol[0][self.DIM.Y]:subvol[1][self.DIM.Y],
subvol[0][self.DIM.X]:subvol[1][self.DIM.X]] = \
im[tile_sub[0][self.DIM.Y]:tile_sub[1][self.DIM.Y],
tile_sub[0][self.DIM.X]:tile_sub[1][self.DIM.X]]
return image_subvol
class SparseSubvolumeBoundsGenerator(six.Iterator):
def __init__(self, volume, margin):
self.volume = volume
self.margin = np.asarray(margin).astype(np.int64)
self.ctr_min = self.margin
self.ctr_max = (np.array(self.volume.shape) - self.margin - 1).astype(np.int64)
self.random = np.random.RandomState(CONFIG.random_seed)
@property
def shape(self):
return self.volume.shape
def __iter__(self):
return self
def reset(self):
self.random.seed(0)
def __next__(self):
ctr = np.array([self.random.randint(self.ctr_min[n], self.ctr_max[n])
for n in range(3)]).astype(np.int64)
return SubvolumeBounds(seed=ctr)
class N5Volume(Volume):
"""A Volume for using an N5 filesystem for image retrieval
Parameters
----------
root_path : string
/absolute/path/to/data.n5
dataset : dict of dicts (dataset name to dataset config)
possible keys: ("mask","labels","image")
values: {"path": path, "dtype": dtype, "read_only": read_only}
resolution : iterable of float
Resolution of the pixels at zoom level 0 in nm.
translation : iterable of float
Translational offset in nm s.t. for given coordinate
a in pixel space, a*resolution+translation = b where
b is in the desired nm coordinates
bounds: iterable of int, optional
Shape of the stack at zoom level 0 in pixels.
necessary if the volume is missing an attributes file
tile_width, tile_height : int, optional
Size of tiles in pixels
necessary if the volume is missing an attributes file
"""
def from_toml(filename):
volumes = {}
with open(filename, "rb") as fin:
volume_configs = toml.load(fin).get("N5Volume", [])
for volume_config in volume_configs:
root_path = volume_config["root_path"]
datasets = volume_config["datasets"]
resolution = volume_config.get("resolution", None)
translation = volume_config.get["translation", None]
bounds = volume_config.get("bounds", None)
volume = N5Volume(
root_path,
datasets,
bounds,
resolution,
translation,
)
volumes[volume_config["title"]] = volume
return volumes
def __init__(
self,
root_path,
datasets,
bounds=None,
resolution=None,
translation=None,
):
self._dtype_map = {
"UINT8": np.uint8,
"UINT16": np.uint16,
"UINT32": np.uint32,
"UINT64": np.uint64,
"INT8": np.int8,
"INT16": np.int16,
"INT32": np.int32,
"INT64": np.int64,
"FLOAT32": np.float32,
"FLOAT64": np.float64,
}
self.bounds = bounds
self.resolution = resolution
self.translation = translation
self.scale = np.exp2(np.array([0, 0, 0])).astype(np.int64)
self.data_shape = (np.array([0, 0, 0]), self.bounds / self.scale)
# Initialization of data sources done in setter methods
self.root_path = root_path
self.image_config = datasets.get("image", None)
self.mask_config = datasets.get("mask", None)
self.label_config = datasets.get("label", None)
@property
def dtype_map(self):
return self._dtype_map
def local_coord_to_world(self, a):
return np.multiply(a, self.scale)
def world_coord_to_local(self, a):
return np.floor_divide(a, self.scale)
def real_coord_to_world(self, a):
return np.floor_divide(a - self.translation, self.orig_resolution)
def world_coord_to_real(self, a):
return np.multiply(a, self.orig_resolution) + self.translation
@property
def octree_leaf_shape(self):
return np.array([10, 10, 10])
@property
def image_config(self):
return self._image_config
@image_config.setter
def image_config(self, dataset):
self._image_config = dataset
if dataset is not None:
self._image_data = OctreeVolume(
self.octree_leaf_shape,
self.data_shape,
self.dtype_map[dataset.get("dtype", "FLOAT32")],
populator=self.image_populator,
)
else:
self._image_data = None
@property
def image_data(self):
return self._image_data
@property
def mask_config(self):
return self._mask_config
@mask_config.setter
def mask_config(self, dataset):
self._mask_config = dataset
if dataset is not None:
self._mask_data = OctreeVolume(
self.octree_leaf_shape,
self.data_shape,
self.dtype_map[dataset.get("dtype", "FLOAT32")],
populator=self.mask_populator,
)
else:
self._mask_data = None
@property
def mask_data(self):
return self._mask_data
@property
def label_config(self):
return self._label_config
@label_config.setter
def label_config(self, dataset):
self._label_config = dataset
if dataset is not None:
self._label_data = OctreeVolume(
self.octree_leaf_shape,
self.data_shape,
self.dtype_map[dataset.get("dtype", "FLOAT32")],
populator=self.label_populator,
)
else:
self._label_data = None
@property
def label_data(self):
return self._label_data
@property
def image_n5(self):
"""
Create a new pyn5.Dataset every time you ask for image_n5.
This is necessary to accomadate parrallel reads since multiple
threads can't use the same reader.
"""
if self.image_config is not None:
return pyn5.open(
self.root_path,
self.image_config.get("path"),
self.image_config.get("dtype", "UINT8"),
self.image_config.get("read_only", True),
)
else:
return None
def image_populator(self, bounds):
return pyn5.read(self.image_n5, (bounds[0], bounds[1]))
@property
def mask_n5(self):
if self.mask_config is not None:
return pyn5.open(
self.root_path,
self.mask_config.get("path"),
self.mask_config.get("dtype", "UINT8"),
self.mask_config.get("read_only", True),
)
else:
return None
def mask_populator(self, bounds):
return pyn5.read(self.mask_n5, (bounds[0], bounds[1]))
@property
def label_n5(self):
if self.label_config is not None:
return pyn5.open(
self.root_path,
self.label_config.get("path"),
self.label_config.get("dtype", "UINT8"),
self.label_config.get("read_only", True),
)
else:
return None
def label_populator(self, bounds):
return pyn5.read(self.label_n5, bounds)
| aschampion/diluvian | diluvian/volumes.py | Python | mit | 61,524 | [
"Gaussian"
] | eaf095a20b764c4a4ca39a7e191ccd6e98bfc1269b8b5c4e9bc25b75e9185f6e |
#!/usr/bin/env python
import __future__
import gtk
from gettext import gettext as _
import os.path
import numpy as np
import sys
from ase.gui.widgets import pack, Help
from ase.data.colors import jmol_colors
from ase.atoms import Atoms
class Execute(gtk.Window):
""" The Execute class provides an expert-user window for modification
and evaluation of system properties with a simple one-line command structure.
There are two types of commands, one set only applies to the global image and
one set applies to all atoms. If the command line contains any of the atom
commands, then it is executed separately for all atoms and for all images.
Otherwise it is executed only once per image.
Please do not mix global and atom commands."""
terminal_help_txt=_("""
Global commands work on all frames or only on the current frame
- Assignment of a global variable may not reference a local one
- use 'Current frame' switch to switch off application to all frames
<c>e</c>:\t\ttotal energy of one frame
<c>fmax</c>:\tmaximal force in one frame
<c>A</c>:\tunit cell
<c>E</c>:\t\ttotal energy array of all frames
<c>F</c>:\t\tall forces in one frame
<c>M</c>:\tall magnetic moments
<c>R</c>:\t\tall atomic positions
<c>S</c>:\tall selected atoms (boolean array)
<c>D</c>:\tall dynamic atoms (boolean array)
examples: <c>frame = 1</c>, <c>A[0][1] += 4</c>, <c>e-E[-1]</c>
Atom commands work on each atom (or a selection) individually
- these can use global commands on the RHS of an equation
- use 'selected atoms only' to restrict application of command
<c>x,y,z</c>:\tatomic coordinates
<c>r,g,b</c>:\tatom display color, range is [0..1]
<c>rad</c>:\tatomic radius for display
<c>s</c>:\t\tatom is selected
<c>d</c>:\t\tatom is movable
<c>f</c>:\t\tforce
<c>Z</c>:\tatomic number
<c>m</c>:\tmagnetic moment
examples: <c>x -= A[0][0], s = z > 5, Z = 6</c>
Special commands and objects:
<c>sa,cf</c>:\t(un)restrict to selected atoms/current frame
<c>frame</c>:\tframe number
<c>center</c>:\tcenters the system in its existing unit cell
<c>del S</c>:\tdelete selection
<c>CM</c>:\tcenter of mass
<c>ans[-i]</c>:\tith last calculated result
<c>exec file</c>: executes commands listed in file
<c>cov[Z]</c>:(read only): covalent radius of atomic number Z
<c>gui</c>:\tadvanced: ag window python object
<c>img</c>:\tadvanced: ag images object
""")
def __init__(self, gui):
gtk.Window.__init__(self)
self.gui = gui
self.set_title(_('Expert user mode'))
vbox = gtk.VBox()
vbox.set_border_width(5)
self.sw = gtk.ScrolledWindow()
self.sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.textview = gtk.TextView()
self.textbuffer = self.textview.get_buffer()
self.textview.set_editable(False)
self.textview.set_cursor_visible(False)
self.sw.add(self.textview)
pack(vbox, self.sw, expand=True, padding = 5)
self.sw.set_size_request(540, 150)
self.textview.show()
self.add_text(_('Welcome to the ASE Expert user mode'))
self.cmd = gtk.Entry(60)
self.cmd.connect('activate', self.execute)
self.cmd.connect('key-press-event', self.update_command_buffer)
pack(vbox, [gtk.Label('>>>'),self.cmd])
self.cmd_buffer = getattr(gui,'expert_mode_buffer',[''])
self.cmd_position = len(self.cmd_buffer)-1
self.selected = gtk.CheckButton(_('Only selected atoms (sa) '))
self.selected.connect('toggled',self.selected_changed)
self.images_only = gtk.CheckButton(_('Only current frame (cf) '))
self.images_only.connect('toggled',self.images_changed)
pack(vbox, [self.selected, self.images_only])
save_button = gtk.Button(stock=gtk.STOCK_SAVE)
save_button.connect('clicked',self.save_output)
help_button = gtk.Button(stock=gtk.STOCK_HELP)
help_button.connect('clicked',self.terminal_help,"")
stop_button = gtk.Button(stock=gtk.STOCK_STOP)
stop_button.connect('clicked',self.stop_execution)
self.stop = False
pack(vbox, [gtk.Label(_('Global: Use A, D, E, M, N, R, S, n, frame;'
' Atoms: Use a, f, m, s, x, y, z, Z ')),
stop_button, help_button, save_button], end = True)
self.add(vbox)
vbox.show()
self.show()
# set color mode to manual when opening this window for rgb manipulation
self.colors = self.gui.get_colors()
rgb_data = self.gui.get_colors(rgb = True)
self.rgb_data = [] # ensure proper format of rgb_data
for i, rgb in enumerate(rgb_data):
self.rgb_data += [[i, rgb]]
self.gui.colordata = self.rgb_data
self.gui.colors = list(self.colors)
self.gui.colormode = 'manual'
self.cmd.grab_focus()
def execute(self, widget=None, cmd = None):
global_commands = ['A','Col','D','e','E','F','frame','M','n','N','R','S'] # explicitly 'implemented' commands for use on whole system or entire single frame
index_commands = ['a','b','d','f','g','m','r','rad','s','x','y','z','Z'] # commands for use on all (possibly selected) atoms
new = self.gui.drawing_area.window.new_gc
alloc = self.gui.colormap.alloc_color
self.stop = False
if cmd is None:
cmd = self.cmd.get_text().strip()
if len(cmd) == 0:
return
self.add_text('>>> '+cmd)
self.cmd_buffer[-1] = cmd
self.cmd_buffer += ['']
setattr(self.gui,'expert_mode_buffer', self.cmd_buffer)
self.cmd_position = len(self.cmd_buffer)-1
self.cmd.set_text('')
else:
self.add_text('--> '+cmd)
gui = self.gui
img = gui.images
frame = gui.frame
N = img.nimages
n = img.natoms
S = img.selected
D = img.dynamic[:, np.newaxis]
E = img.E
if self.selected.get_active():
indices = np.where(S)[0]
else:
indices = range(n)
ans = getattr(gui,'expert_mode_answers',[])
loop_images = range(N)
if self.images_only.get_active():
loop_images = [self.gui.frame]
# split off the first valid command in cmd to determine whether
# it is global or index based, this includes things such as 4*z and z*4
index_based = False
first_command = cmd.split()[0]
special = ['=',',','+','-','/','*',';','.','[',']','(',')',
'{','}','0','1','2','3','4','5','6','7','8','9']
while first_command[0] in special and len(first_command)>1:
first_command = first_command[1:]
for c in special:
if c in first_command:
first_command = first_command[:first_command.find(c)]
for c in index_commands:
if c == first_command:
index_based = True
name = os.path.expanduser('~/.ase/'+cmd)
# check various special commands:
if os.path.exists(name): # run script from default directory
self.run_script(name)
elif cmd == 'del S': # delete selection
gui.delete_selected_atoms()
elif cmd == 'sa': # selected atoms only
self.selected.set_active(not self.selected.get_active())
elif cmd == 'cf': # current frame only
self.images_only.set_active(not self.images_only.get_active())
elif cmd == 'center': # center system
img.center()
elif cmd == 'CM': # calculate center of mass
for i in loop_images:
if self.stop:
break
atoms = Atoms(positions=img.P[i][indices],
numbers=img.Z[indices])
self.add_text(repr(atoms.get_center_of_mass()))
ans += [atoms.get_center_of_mass()]
elif first_command == 'exec': # execute script
name = cmd.split()[1]
if '~' in name:
name = os.path.expanduser(name)
if os.path.exists(name):
self.run_script(name)
else:
self.add_text(_('*** WARNING: file does not exist - %s') % name)
else:
code = compile(cmd + '\n', 'execute.py', 'single',
__future__.CO_FUTURE_DIVISION)
if index_based and len(indices) == 0 and self.selected.get_active():
self.add_text(_("*** WARNING: No atoms selected to work with"))
for i in loop_images:
if self.stop:
break
R = img.P[i][indices]
A = img.A[i]
F = img.F[i][indices]
e = img.E[i]
M = img.M[i][indices]
Col = []
cov = img.covalent_radii
for j in indices:
Col += [gui.colordata[j]]
if len(indices) > 0:
fmax = max(((F * D[indices])**2).sum(1)**.5)
else:
fmax = None
frame = gui.frame
if not index_based:
try:
self.add_text(repr(eval(cmd)))
ans += [eval(cmd)]
except:
exec code
gui.set_frame(frame)
if gui.movie_window is not None:
gui.movie_window.frame_number.value = frame
img.selected = S
img.A[i] = A
img.P[i][indices] = R
img.M[i][indices] = M
else:
for n,a in enumerate(indices):
if self.stop:
break
x, y, z = R[n]
r, g, b = Col[n][1]
d = D[a]
f = np.vdot(F[n]*d,F[n]*d)**0.5
s = S[a]
Z = img.Z[a]
Zold = Z
m = M[n]
rad = img.r[a]
try:
self.add_text(repr(eval(cmd)))
ans += [eval(cmd)]
except:
exec code
S[a] = s
img.P[i][a] = x, y, z
img.Z[a] = Z
img.r[a] = rad
img.dynamic[a] = d
if Z != Zold:
img.r[a] = cov[Z] * 0.89
r,g,b = jmol_colors[Z]
gui.colordata[a] = [a,[r,g,b]]
color = tuple([int(65535*x) for x in [r,g,b]])
gui.colors[a] = new(alloc(*color))
img.M[i][a] = m
setattr(self.gui,'expert_mode_answers', ans)
gui.set_frame(frame,init=True)
def add_text(self,val):
text_end = self.textbuffer.get_end_iter()
self.textbuffer.insert(text_end,val+'\n');
if self.sw.get_vscrollbar() is not None:
scroll = self.sw.get_vscrollbar().get_adjustment()
scroll.set_value(scroll.get_upper())
def selected_changed(self, *args):
if self.selected.get_active():
self.add_text(_('*** Only working on selected atoms'))
else:
self.add_text(_('*** Working on all atoms'))
def images_changed(self, *args):
if self.images_only.get_active():
self.add_text(_('*** Only working on current image'))
else:
self.add_text(_('*** Working on all images'))
def update_command_buffer(self, entry, event, *args):
arrow = {gtk.keysyms.Up: -1, gtk.keysyms.Down: 1}.get(event.keyval, None)
if arrow is not None:
self.cmd_position += arrow
self.cmd_position = max(self.cmd_position,0)
self.cmd_position = min(self.cmd_position,len(self.cmd_buffer)-1)
cmd = self.cmd_buffer[self.cmd_position]
self.cmd.set_text(cmd)
return True
else:
return False
def save_output(self, *args):
chooser = gtk.FileChooserDialog(
_('Save Terminal text ...'), None, gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
save = chooser.run()
if save == gtk.RESPONSE_OK or save == gtk.RESPONSE_SAVE:
filename = chooser.get_filename()
text = self.textbuffer.get_text(self.textbuffer.get_start_iter(),
self.textbuffer.get_end_iter())
fd = open(filename,'w')
fd.write(text)
fd.close()
chooser.destroy()
def run_script(self, name):
commands = open(name,'r').readlines()
for c_parse in commands:
c = c_parse.strip()
if '#' in c:
c = c[:c.find('#')].strip()
if len(c) > 0:
self.execute(cmd = c.strip())
def terminal_help(self,*args):
Help(self.terminal_help_txt)
def stop_execution(self, *args):
self.stop = True
python = execute
| JConwayAWT/PGSS14CC | lib/python/multimetallics/ase/gui/execute.py | Python | gpl-2.0 | 13,683 | [
"ASE"
] | 1cdb0a1b48221db79efc5553363447fb8bf8fffb907625b114c4b8f094faeceb |
inp=open('blastx_glimmer_ncbi_tce1_n3_b80.xls', 'r')
inp2=inp.read()
spl=inp2.split('# Query:')
headers=open('headers_ncbi_DB.txt','r').read()
intergenic=open('blastx_glimmer_annotated.xls','w') #no es intergenic, pero asi se llama la variable ;)
orfs=open('genes_glimmer_TCE1.fasta','r').read()
##########################orfs, que no siguen bien el orden
orfs=orfs.split('>')
print len(orfs)
#####################headers
headers= headers.split('>')
#########################BLAST hits
for a in range(0, len(spl)): #por cada enrtada del blast, que tiene mal el numero
positive='no'
if '# 0 hits found' not in spl[a]:
tabs=spl[a].split('\t')
if len(tabs) > 12:
tabs=tabs[:12]
elif len(tabs)>10:
ref=tabs[1] #ref es el id del blast
evalue=float(tabs[10])
qstart=tabs[6]
qend=tabs[7]
lenght=int(tabs[3])
if lenght > 1 and evalue < 10:
for h in headers:
if ref in h:
positive='si'
intergenic.write(h[:-1]+'\t'+str(qstart)+'\t'+str(qend)+'\t'+str(evalue)+'\t'+orfs[a][1:orfs[a].find('\n')]+'\n')
#break
if positive=='no': #si la secuencia no esta
intergenic.write(str(tabs[1])+'\t'+str(qstart)+'\t'+str(qend)+'\t'+str(evalue)+'\t'+orfs[a][1:orfs[a].find('\n')]+'\n')
print 'fini'
| elecabfer/BLAST- | blastx_ncbi2.py | Python | mit | 1,469 | [
"BLAST"
] | 68af1a0acaf465a98b6cda7865fd2a2375c57bb890f2458cdf4ad7e4c71b3fd2 |
"""
Functions used to save model data and to perform analysis
"""
import numpy as np
from parameters import *
from sklearn import svm
import time
import pickle
import stimulus
import os
import copy
import matplotlib.pyplot as plt
from itertools import product
from scipy import signal
from scipy.optimize import curve_fit
data_dir = './savedir/'
neuron_groups = []
neuron_groups.append(range(0,par['num_exc_units'],2))
neuron_groups.append(range(1,par['num_exc_units'],2))
neuron_groups.append(range(par['num_exc_units'],par['num_exc_units']+par['num_inh_units'],2))
neuron_groups.append(range(par['num_exc_units']+1,par['num_exc_units']+par['num_inh_units'],2))
neuron_groups.append(range(par['n_hidden']))
def run_multiple():
task_list = ['DMS']
update_params = {
'decode_stability': False,
'decoding_reps': 100,
'simulation_reps': 100,
'analyze_tuning': True,
'calculate_resp_matrix': True,
'suppress_analysis': False,
'decode_test': False,
'decode_rule': False,
'decode_match': False,
'svm_normalize': True}
for t in task_list:
for j in range(20):
fn = data_dir + t + str(j) + '.pkl'
print('Analyzing ', fn)
analyze_model_from_file(fn, savefile = fn, update_params = update_params)
def analyze_model_from_file(filename, savefile = None, update_params = {}):
""" The first section loads the model weights and simulates the network on
several different task conditions, saving the network activity and output """
results = pickle.load(open(filename, 'rb'))
if savefile is None:
results['parameters']['save_fn'] = 'test.pkl'
else:
results['parameters']['save_fn'] = savefile
update_parameters(results['parameters'])
update_parameters(update_params)
stim = stimulus.Stimulus()
# generate trials with match probability at 50%
trial_info = stim.generate_trial(test_mode = True)
input_data = np.squeeze(np.split(trial_info['neural_input'], par['num_time_steps'], axis=0))
h_init = results['weights']['h']
y, h, syn_x, syn_u = run_model(input_data, h_init, \
results['parameters']['syn_x_init'], results['parameters']['syn_u_init'], results['weights'])
# generate trials with random sample and test stimuli, used for decoding
trial_info_decode = stim.generate_trial(test_mode = True)
input_data = np.squeeze(np.split(trial_info_decode['neural_input'], par['num_time_steps'], axis=0))
_, h_decode, syn_x_decode, syn_u_decode = run_model(input_data, h_init, \
results['parameters']['syn_x_init'], results['parameters']['syn_u_init'], results['weights'])
# generate trials using DMS task, only used for measuring how neuronal and synaptic representations evolve in
# a standardized way, used for figure correlating persistent activity and manipulation
update_parameters({'trial_type': 'DMS'})
trial_info_dms = stim.generate_trial(test_mode = True)
input_data = np.squeeze(np.split(trial_info_dms['neural_input'], trial_info_dms['neural_input'].shape[0], axis=0))
_, h_dms, syn_x_dms, syn_u_dms = run_model(input_data, h_init, \
results['parameters']['syn_x_init'], results['parameters']['syn_u_init'], results['weights'])
update_parameters(results['parameters']) # reset trial type to original value
update_parameters(update_params)
""" The next section performs various analysis """
# calculate task accuracy
results['task_accuracy'],_,_ = get_perf(trial_info['desired_output'], y, trial_info['train_mask'])
results['task_accuracy_per_rule'] = []
for r in np.unique(trial_info['rule']):
ind = np.where(trial_info['rule'] == r)[0]
acc, _, _ = get_perf(trial_info['desired_output'][:,ind,:], y[:,ind,:], trial_info['train_mask'][:, ind])
results['task_accuracy_per_rule'].append(acc)
print('Task accuracy',results['task_accuracy'])
if par['calculate_resp_matrix']:
print('calculate response matrix...')
resp_matrix_results = calculate_response_matrix(trial_info_decode, results['weights'])
for key, val in resp_matrix_results.items():
if np.var(val) > 0:
results[key] = val
# Decode the sample direction from neuronal activity and synaptic efficacies using support vector machines
trial_time = np.arange(0,h.shape[0]*par['dt'], par['dt'])
trial_time_dms = np.arange(0,h_dms.shape[0]*par['dt'], par['dt'])
if par['decoding_reps'] > 0:
print('decoding activity...')
decoding_results = calculate_svms(h_decode, syn_x_decode, syn_u_decode, trial_info_decode, trial_time, \
num_reps = par['decoding_reps'], num_reps_stability = 10, decode_test = par['decode_test'], \
decode_rule = par['decode_rule'], decode_match = par['decode_match'])
for key, val in decoding_results.items():
if np.var(val) > 0:
results[key] = val
if par['trial_type'] in ['DMS', 'DMC', 'DMRS90', 'DMRS90ccw', 'DMRS45', 'DMRS180', 'location_DMS']:
for key, val in decoding_results.items():
if np.var(val) > 0:
results[key + '_dms'] = val
else:
# Calculate decoding for a DMS trial, used to correlate persistent activity and manipulation
update_parameters({'trial_type': 'DMS'})
decoding_results = calculate_svms(h_dms, syn_x_dms, syn_u_dms, trial_info_dms, trial_time_dms, \
num_reps = par['decoding_reps'], num_reps_stability = 0, decode_test = par['decode_test'], decode_rule = par['decode_rule'])
for key, val in decoding_results.items():
if np.var(val) > 0:
results[key + '_dms'] = val
update_parameters(results['parameters'])
update_parameters(update_params)
# Calculate neuronal and synaptic sample motion tuning
if par['analyze_tuning']:
print('calculate tuning...')
tuning_results = calculate_tuning(h_decode, syn_x_decode, syn_u_decode, \
trial_info_decode, trial_time, results['weights'], calculate_test = par['decode_test'])
for key, val in tuning_results.items():
if np.var(val) > 0:
results[key] = val
# Calculate tuning for a DMS trial, used to correlate persistent activity and manipulation
if par['trial_type'] in ['DMS', 'DMC', 'DMRS90', 'DMRS90ccw','DMRS45', 'DMRS180', 'location_DMS']:
for key, val in tuning_results.items():
if np.var(val) > 0:
results[key + '_dms'] = val
else:
update_parameters({'trial_type': 'DMS'})
tuning_results = calculate_tuning(h_dms, syn_x_dms, syn_u_dms, \
trial_info_dms, trial_time_dms, results['weights'], calculate_test = False)
for key, val in tuning_results.items():
if np.var(val) > 0:
results[key + '_dms'] = val
update_parameters(results['parameters'])
update_parameters(update_params)
# Calculate mean sample traces
results['h_sample_mean'] = np.zeros((results['parameters']['num_time_steps'], results['parameters']['n_hidden'], \
results['parameters']['num_motion_dirs']), dtype = np.float32)
for i in range(results['parameters']['num_motion_dirs']):
ind = np.where(trial_info_decode['sample'] == i)[0]
results['h_sample_mean'][:,:,i] = np.mean(h_decode[:,ind,:], axis = 1)
# Calculate the neuronal and synaptic contributions towards solving the task
if par['simulation_reps'] > 0:
print('simulating network...')
simulation_results = simulate_network(trial_info, h, syn_x, \
syn_u, results['weights'], num_reps = par['simulation_reps'])
for key, val in simulation_results.items():
if np.var(val) > 0:
results[key] = val
# Save the analysis results
pickle.dump(results, open(savefile, 'wb') )
print('Analysis results saved in ', savefile)
def calculate_svms(h, syn_x, syn_u, trial_info, trial_time, num_reps = 20, num_reps_stability = 5, \
decode_test = False, decode_rule = False, decode_match = False, decode_neuronal_groups = False):
""" Calculates neuronal and synaptic decoding accuracies uisng support vector machines """
lin_clf = svm.SVC(C=1, kernel='linear', decision_function_shape='ovr', shrinking=False, tol=1e-3)
num_time_steps = len(trial_time)
decoding_results = {}
# The synaptic efficacy is the product of syn_x and syn_u. Will decode sample
# direction from this value
syn_efficacy = syn_x*syn_u
if par['trial_type'] == 'DMC':
# Will also calculate the category decoding accuracies, assuming the first half of
# the sample direction belong to category 1, and the second half belong to category 2
num_motion_dirs = len(np.unique(trial_info['sample']))
sample = np.floor(trial_info['sample']/(num_motion_dirs/2)*np.ones_like(trial_info['sample']))
test = np.floor(trial_info['test']/(num_motion_dirs/2)*np.ones_like(trial_info['sample']))
rule = trial_info['rule']
match = np.array(trial_info['match'])
elif par['trial_type'] == 'dualDMS':
sample = trial_info['sample']
rule = trial_info['rule'][:,0] + 2*trial_info['rule'][:,1]
par['num_rules'] = 4
match = np.array(trial_info['match'])
elif par['trial_type'] == 'locationDMS':
sample = trial_info['sample'][:, 0]
test = trial_info['test']
rule = trial_info['rule']
match = np.array(trial_info['match'])
elif par['trial_type'] == 'ABBA' or par['trial_type'] == 'ABCA':
sample = trial_info['sample']
rule = trial_info['rule']
test = np.array(trial_info['test'][:,0])
match = np.array(trial_info['match'][:,0])
elif par['trial_type'] == 'DMS+DMC':
# rule 0 is DMS, rule 1 is DMC
ind_rule = np.where(trial_info['rule']==1)[0]
num_motion_dirs = len(np.unique(trial_info['sample']))
sample = np.array(trial_info['sample'])
test = np.array(trial_info['test'])
# change DMC sample motion directions into categories
sample[ind_rule] = np.floor(trial_info['sample'][ind_rule]/(num_motion_dirs/2)*np.ones_like(trial_info['sample'][ind_rule]))
test[ind_rule] = np.floor(trial_info['test'][ind_rule]/(num_motion_dirs/2)*np.ones_like(trial_info['sample'][ind_rule]))
rule = trial_info['rule']
match = np.array(trial_info['match'])
else:
sample = np.array(trial_info['sample'])
rule = np.array(trial_info['rule'])
match = np.array(trial_info['match'])
if trial_info['test'].ndim == 2:
test = trial_info['test'][:,0]
else:
test = np.array(trial_info['test'])
if len(np.unique(np.array(trial_info['rule']))) == 1 and decode_rule:
print('Only one unique rule; setting decode rule to False')
decode_rule = False
decoding_results['neuronal_sample_decoding'], decoding_results['synaptic_sample_decoding'], \
decoding_results['neuronal_sample_decoding_stability'], decoding_results['synaptic_sample_decoding_stability'] = \
svm_wraper(lin_clf, h, syn_efficacy, sample, rule, num_reps, num_reps_stability, trial_time)
to = (par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time'])//par['dt']
print('Neuronal and synaptic delay period decoding', \
np.mean(decoding_results['neuronal_sample_decoding'][0,0,:,to-10:to]), \
np.mean(decoding_results['synaptic_sample_decoding'][0,0,:,to-10:to]))
if decode_test:
decoding_results['neuronal_test_decoding'], decoding_results['synaptic_test_decoding'] ,_ ,_ = \
svm_wraper(lin_clf, h, syn_efficacy, test, rule, num_reps, 0, trial_time)
if decode_match:
decoding_results['neuronal_match_decoding'], decoding_results['synaptic_match_decoding'] ,_ ,_ = \
svm_wraper(lin_clf, h, syn_efficacy, match, rule, num_reps, 0, trial_time)
if decode_rule:
decoding_results['neuronal_rule_decoding'], decoding_results['synaptic_rule_decoding'] ,_ ,_ = \
svm_wraper(lin_clf, h, syn_efficacy, trial_info['rule'], np.zeros_like(rule), num_reps, 0, trial_time)
if decode_neuronal_groups:
decoding_results['neuronal_sample_decoding_group'] = []
decoding_results['synaptic_sample_decoding_group'] = []
decoding_results['neuronal_test_decoding_group'] = []
decoding_results['synaptic_test_decoding_group'] = []
decoding_results['neuronal_match_decoding_group'] = []
decoding_results['synaptic_match_decoding_group'] = []
for i in range(4):
neuronal_decoding, synaptic_decoding, _, _ = \
svm_wraper(lin_clf, h[neuron_groups[i],:,:], syn_efficacy[neuron_groups[i],:,:], sample, rule, 20, 0, trial_time)
decoding_results['neuronal_sample_decoding_group'].append(neuronal_decoding)
decoding_results['synaptic_sample_decoding_group'].append(synaptic_decoding)
neuronal_decoding, synaptic_decoding, _, _ = \
svm_wraper(lin_clf, h[neuron_groups[i],:,:], syn_efficacy[neuron_groups[i],:,:], test, rule, 20, 0, trial_time)
decoding_results['neuronal_test_decoding_group'].append(neuronal_decoding)
decoding_results['synaptic_test_decoding_group'].append(synaptic_decoding)
neuronal_decoding, synaptic_decoding, _, _ = \
svm_wraper(lin_clf, h[neuron_groups[i],:,:], syn_efficacy[neuron_groups[i],:,:], match, rule, 20, 0, trial_time)
decoding_results['neuronal_match_decoding_group'].append(neuronal_decoding)
decoding_results['synaptic_match_decoding_group'].append(synaptic_decoding)
return decoding_results
def svm_wraper_simple(lin_clf, h, syn_eff, stimulus, rule, num_reps, num_reps_stability, trial_time):
train_pct = 0.75
num_time_steps, num_trials, _ = h.shape
num_rules = len(np.unique(rule))
# 4 refers to four data_type, normalized neural activity and synaptic efficacy, and unnormalized neural activity and synaptic efficacy
score = np.zeros((4, num_rules, par['num_receptive_fields'], num_reps, num_time_steps), dtype = np.float32)
score_dynamic = np.zeros((4, num_rules, par['num_receptive_fields'], num_reps_stability, num_time_steps, num_time_steps), dtype = np.float32)
# number of reps used to calculate encoding stability should not be larger than number of normal deocding reps
num_reps_stability = np.minimum(num_reps_stability, num_reps)
for r in range(num_rules):
ind_rule = np.where(rule==r)[0]
for rf in range(par['num_receptive_fields']):
if par['trial_type'] == 'dualDMS':
labels = np.array(stimulus[:,rf])
else:
labels = np.array(stimulus)
for rep in range(num_reps):
q = np.random.permutation(num_trials)
ind_train = q[:round(train_pct*num_trials)]
ind_test = q[round(train_pct*num_trials):]
for data_type in [0]:
if data_type == 0:
z = normalize_values(h, ind_train)
elif data_type == 1:
z = normalize_values(syn_eff, ind_train)
elif data_type == 2:
z = np.array(h)
elif data_type == 3:
z = np.array(syn_eff)
for t in range(num_time_steps):
lin_clf.fit(z[:,t,ind_train].T, labels[ind_train])
predicted_sample = lin_clf.predict(z[:,t,ind_test].T)
score[data_type, r, rf, rep, t] = np.mean( labels[ind_test]==predicted_sample)
if rep < num_reps_stability and par['decode_stability']:
for t1 in range(num_time_steps):
predicted_sample = lin_clf.predict(z[:,t1,ind_test].T)
score_dynamic[data_type, r, rf, rep, t, t1] = np.mean(labels[ind_test]==predicted_sample)
return score, score_dynamic
def svm_wraper(lin_clf, h, syn_eff, conds, rule, num_reps, num_reps_stability, trial_time):
""" Wraper function used to decode sample/test or rule information
from hidden activity (h) and synaptic efficacies (syn_eff) """
train_pct = 0.75
num_time_steps, num_trials, _ = h.shape
num_rules = len(np.unique(rule))
score_h = np.zeros((num_rules, par['num_receptive_fields'], num_reps, num_time_steps), dtype = np.float32)
score_syn_eff = np.zeros((num_rules, par['num_receptive_fields'], num_reps, num_time_steps), dtype = np.float32)
score_h_stability = np.zeros((num_rules, par['num_receptive_fields'], num_reps_stability, num_time_steps, num_time_steps), dtype = np.float32)
score_syn_eff_stability = np.zeros((num_rules, par['num_receptive_fields'], num_reps_stability, num_time_steps, num_time_steps), dtype = np.float32)
# number of reps used to calculate encoding stability should not be larger than number of normal deocding reps
num_reps_stability = np.minimum(num_reps_stability, num_reps)
for r in range(num_rules):
ind_rule = np.where(rule==r)[0]
for n in range(par['num_receptive_fields']):
if par['trial_type'] == 'dualDMS':
current_conds = np.array(conds[:,n])
else:
current_conds = np.array(conds)
num_conds = len(np.unique(conds[ind_rule]))
if num_conds <= 2:
trials_per_cond = 100
else:
trials_per_cond = 25
equal_train_ind = np.zeros((num_conds*trials_per_cond), dtype = np.uint16)
equal_test_ind = np.zeros((num_conds*trials_per_cond), dtype = np.uint16)
cond_ind = []
for c in range(num_conds):
cond_ind.append(ind_rule[np.where(current_conds[ind_rule] == c)[0]])
if len(cond_ind[c]) < 4:
print('Not enough trials for this condition!')
print('Setting cond_ind to [0,1,2,3]')
cond_ind[c] = [0,1,2,3]
for rep in range(num_reps):
for c in range(num_conds):
u = range(c*trials_per_cond, (c+1)*trials_per_cond)
q = np.random.permutation(len(cond_ind[c]))
i = int(np.round(len(cond_ind[c])*train_pct))
train_ind = cond_ind[c][q[:i]]
test_ind = cond_ind[c][q[i:]]
q = np.random.randint(len(train_ind), size = trials_per_cond)
equal_train_ind[u] = train_ind[q]
q = np.random.randint(len(test_ind), size = trials_per_cond)
equal_test_ind[u] = test_ind[q]
score_h[r,n,rep,:] = calc_svm(lin_clf, h, current_conds, current_conds, equal_train_ind, equal_test_ind)
score_syn_eff[r,n,rep,:] = calc_svm(lin_clf, syn_eff, current_conds, current_conds, equal_train_ind, equal_test_ind)
if par['decode_stability'] and rep < num_reps_stability:
score_h_stability[r,n,rep,:,:] = calc_svm_stability(lin_clf, h, current_conds, current_conds, equal_train_ind, equal_test_ind)
score_syn_eff_stability[r,n,rep,:,:] = calc_svm_stability(lin_clf, syn_eff, current_conds, current_conds, equal_train_ind, equal_test_ind)
return score_h, score_syn_eff, score_h_stability, score_syn_eff_stability
def calc_svm_stability(lin_clf, y, train_conds, test_conds, train_ind, test_ind):
n_test_inds = len(test_ind)
score = np.zeros((par['num_time_steps'], par['num_time_steps']))
#y = normalize_values(y, train_ind)
t0 = time.time()
for t in range(par['dead_time']//par['dt'], par['num_time_steps']):
lin_clf.fit(y[t,train_ind,:], train_conds[train_ind])
for t1 in range(par['dead_time']//par['dt'],par['num_time_steps']):
dec = lin_clf.predict(y[t1,test_ind,:])
score[t, t1] = np.mean(test_conds[test_ind] == dec)
return score
def calc_svm(lin_clf, y, train_conds, test_conds, train_ind, test_ind):
n_test_inds = len(test_ind)
score = np.zeros((par['num_time_steps']))
y = normalize_values(y, train_ind)
for t in range(par['dead_time']//par['dt'], par['num_time_steps']):
lin_clf.fit(y[t,train_ind,:], train_conds[train_ind])
dec = lin_clf.predict(y[t,test_ind,:])
score[t] = np.mean(test_conds[test_ind]==dec)
return score
def normalize_values(z, train_ind):
if not par['svm_normalize']:
return z
# normalize values between 0 and 1
for t, n in product(range(z.shape[0]), range(z.shape[2])): # loop across time, neurons
m1 = z[t,train_ind,n].min()
m2 = z[t,train_ind,n].max()
z[t,:,n] -= m1
if m2 > m1:
z[t,:,n] /= (m2-m1)
return z
def calculate_response_matrix(trial_info, network_weights):
test_onset = (par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time'])//par['dt']
resp_matrix_results = {
'resp_no_suppresion' : np.zeros((par['n_hidden'], par['num_motion_dirs'], par['num_motion_dirs']), dtype = np.float32),
'resp_suppresion' : np.zeros((par['n_hidden'], par['n_hidden'], par['num_motion_dirs'], par['num_motion_dirs']), dtype = np.float32)}
x = np.split(trial_info['neural_input'],par['num_time_steps'],axis=0)
_, h, _, _ = run_model(x, network_weights['h'], par['syn_x_init'], par['syn_u_init'], network_weights)
resp_matrix_results['resp_no_suppresion'] = average_test_response(h, trial_info, test_onset)
for n in range(par['n_hidden']):
suppress_activity = np.ones((par['num_time_steps'], par['n_hidden']))
suppress_activity[test_onset:, n] = 0 # suppress activity starting from test onset
suppress_activity = np.split(suppress_activity, par['num_time_steps'], axis=0)
_, h,_,_ = run_model(x, network_weights['h'], par['syn_x_init'], par['syn_u_init'], \
network_weights, suppress_activity = suppress_activity)
resp_matrix_results['resp_suppresion'][n,:,:,:] = average_test_response(h, trial_info, test_onset)
return resp_matrix_results
def average_test_response(h, trial_info, test_onset):
resp = np.zeros((par['n_hidden'], par['num_motion_dirs'], par['num_motion_dirs']), dtype = np.float32)
h_test = np.mean(h[test_onset:, :, : ], axis=0)
for i in range(par['num_motion_dirs']):
for j in range(par['num_motion_dirs']):
ind = np.where((trial_info['sample']==i)*(trial_info['test']==j))[0]
resp[:, i, j] = np.mean(h_test[ind, :], axis = 0)
return resp
def simulate_network(trial_info, h, syn_x, syn_u, network_weights, num_reps = 20):
epsilon = 1e-3
# Simulation will start from the start of the test period until the end of trial
if par['trial_type'] == 'dualDMS':
test_onset = [(par['dead_time']+par['fix_time']+par['sample_time']+2*par['delay_time']+par['test_time'])//par['dt']]
elif par['trial_type'] in ['ABBA','ABCA']:
test_onset = [(par['dead_time']+par['fix_time']+par['sample_time']+i*par['ABBA_delay'])//par['dt'] for i in range(1,2)]
elif par['trial_type'] in ['DMS', 'DMC', 'DMRS90', 'DMRS90ccw']:
test_onset = []
test_onset.append((par['dead_time']+par['fix_time']+par['sample_time'])//par['dt'])
test_onset.append((par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time'])//par['dt'])
num_test_periods = len(test_onset)
suppression_time_range = [range(test_onset[0]-200//par['dt'], test_onset[0])]
syn_efficacy = syn_x*syn_u
test = np.array(trial_info['test'])
sample = np.array(trial_info['sample'])
if test.ndim == 2:
test = test[:, 0]
elif test.ndim == 3:
test = test[:, 0, 0]
test_dir = np.ones((len(test), 3))
test_dir[:,1] = np.cos(2*np.pi*test/par['num_motion_dirs'])
test_dir[:,2] = np.sin(2*np.pi*test//par['num_motion_dirs'])
trial_length, batch_train_size, n_hidden = h.shape
num_grp_reps = 5
simulation_results = {
'simulation_accuracy' : np.zeros((par['num_rules'], num_test_periods, num_reps)),
'accuracy_neural_shuffled' : np.zeros((par['num_rules'], num_test_periods, num_reps)),
'accuracy_syn_shuffled' : np.zeros((par['num_rules'], num_test_periods, num_reps)),
'accuracy_suppression' : np.zeros((par['num_rules'], len(suppression_time_range), len(neuron_groups), num_grp_reps)),
'accuracy_neural_shuffled_grp' : np.zeros((par['num_rules'], num_test_periods, len(neuron_groups), num_grp_reps)),
'accuracy_syn_shuffled_grp' : np.zeros((par['num_rules'], num_test_periods, len(neuron_groups), num_grp_reps)),
'synaptic_pev_test_suppression' : np.zeros((par['num_rules'], num_test_periods, len(neuron_groups), n_hidden, trial_length)),
'synaptic_pref_dir_test_suppression': np.zeros((par['num_rules'], num_test_periods, len(neuron_groups), n_hidden, trial_length))}
mask = np.array(trial_info['train_mask'])
if par['trial_type'] == 'ABBA' or par['trial_type'] == 'ABCA':
t0 = (par['dead_time']+par['fix_time']+par['sample_time'] + 2*par['ABBA_delay'])//par['dt']
mask[:t0,:] = 0
t0 = (par['dead_time']+par['fix_time']+par['sample_time'] + 4*par['ABBA_delay'])//par['dt']
mask[t0:,:] = 0
for r, t in product(range(par['num_rules']), range(num_test_periods)):
test_length = trial_length - test_onset[t]
trial_ind = np.where(trial_info['rule']==r)[0]
train_mask = mask[test_onset[t]:,trial_ind]
x = np.split(trial_info['neural_input'][test_onset[t]:,trial_ind, :],test_length,axis=0)
desired_output = trial_info['desired_output'][test_onset[t]:,trial_ind, :]
for n in range(num_reps):
# Calculating behavioral accuracy without shuffling
hidden_init = np.copy(h[test_onset[t]-1,trial_ind,:])
syn_x_init = np.copy(syn_x[test_onset[t]-1,trial_ind,:])
syn_u_init = np.copy(syn_u[test_onset[t]-1,trial_ind,:])
y, _, _, _ = run_model(x, hidden_init, syn_x_init, syn_u_init, network_weights)
simulation_results['simulation_accuracy'][r,t,n] ,_ ,_ = get_perf(desired_output, y, train_mask)
# Keep the synaptic values fixed, permute the neural activity
ind_shuffle = np.random.permutation(len(trial_ind))
hidden_init = np.copy(hidden_init[ind_shuffle, :])
y, _, _, _ = run_model(x, hidden_init, syn_x_init, syn_u_init, network_weights)
simulation_results['accuracy_neural_shuffled'][r,t,n] ,_ ,_ = get_perf(desired_output, y, train_mask)
# Keep the hidden values fixed, permute synaptic values
hidden_init = np.copy(h[test_onset[t]-1,trial_ind, :])
syn_x_init = np.copy(syn_x_init[ind_shuffle, :])
syn_u_init = np.copy(syn_u_init[ind_shuffle, :])
y, _, _, _ = run_model(x, hidden_init, syn_x_init, syn_u_init, network_weights)
simulation_results['accuracy_syn_shuffled'][r,t,n] ,_ ,_ = get_perf(desired_output, y, train_mask)
for n in range(num_grp_reps): # Neuron group shuffling
for g in range(len(neuron_groups)):
# reset everything
hidden_init = np.copy(h[test_onset[t]-1,trial_ind,:])
syn_x_init = np.copy(syn_x[test_onset[t]-1,trial_ind,:])
syn_u_init = np.copy(syn_u[test_onset[t]-1,trial_ind,:])
# shuffle neuronal activity
ind_shuffle = np.random.permutation(len(trial_ind))
for neuron_num in neuron_groups[g]:
hidden_init[:, neuron_num] = hidden_init[ind_shuffle, neuron_num]
y, _, syn_x_hist, syn_u_hist = run_model(x, hidden_init, syn_x_init, syn_u_init, network_weights)
simulation_results['accuracy_neural_shuffled_grp'][r,t,g,n] ,_ ,_ \
= get_perf(desired_output, y, train_mask)
if par['trial_type'] in ['ABBA','ABCA']:
syn_efficacy = syn_x_hist*syn_u_hist
for hidden_num in range(par['n_hidden']):
for t1 in range(test_length):
weights = np.linalg.lstsq(test_dir[trial_ind,:], syn_efficacy[t1,trial_ind,hidden_num],rcond=None)
weights = np.reshape(weights[0],(3,1))
pred_err = syn_efficacy[t1,trial_ind,hidden_num] - np.dot(test_dir[trial_ind,:], weights).T
mse = np.mean(pred_err**2)
response_var = np.var(syn_efficacy[t1,trial_ind,hidden_num])
simulation_results['synaptic_pev_test_shuffled'][r,t,g,n, hidden_num,t1+test_onset[t]] = 1 - mse/(response_var + epsilon)
simulation_results['synaptic_pref_dir_test_shuffled'][r,t,g,n,hidden_num,t1+test_onset[t]] = np.arctan2(weights[2,0],weights[1,0])
# reset neuronal activity, shuffle synaptic activity
hidden_init = h[test_onset[t]-1,trial_ind,:]
for neuron_num in neuron_groups[g]:
syn_x_init[:,neuron_num] = syn_x_init[ind_shuffle,neuron_num]
syn_u_init[:,neuron_num] = syn_u_init[ind_shuffle,neuron_num]
y, _, _, _ = run_model(x, hidden_init, syn_x_init, syn_u_init, network_weights)
simulation_results['accuracy_syn_shuffled_grp'][r,t,g,n] ,_ ,_ = get_perf(desired_output, y, train_mask)
if par['trial_type'] in ['ABBA','ABCA']:
syn_efficacy = syn_x_hist*syn_u_hist
for hidden_num in range(par['n_hidden']):
for t1 in range(test_length):
weights = np.linalg.lstsq(test_dir[trial_ind,:], syn_efficacy[t1,trial_ind,hidden_num],rcond=None)
weights = np.reshape(weights[0],(3,1))
pred_err = syn_efficacy[t1,trial_ind,hidden_num] - np.dot(test_dir[trial_ind,:], weights).T
mse = np.mean(pred_err**2)
response_var = np.var(syn_efficacy[hidden_num,t1,trial_ind])
simulation_results['synaptic_pev_test_shuffled'][r,t,g,n, hidden_num,t1+test_onset[t]] = 1 - mse/(response_var + epsilon)
simulation_results['synaptic_pref_dir_test_shuffled'][r,t,g,n,hidden_num,t1+test_onset[t]] = np.arctan2(weights[2,0],weights[1,0])
if par['suppress_analysis'] and False:
"""
if par['trial_type'] == 'ABBA' or par['trial_type'] == 'ABCA':
test_onset_sup = (par['fix_time']+par['sample_time']+par['ABBA_delay'])//par['dt']
elif par['trial_type'] == 'DMS' or par['trial_type'] == 'DMC' or \
par['trial_type'] == 'DMRS90' or par['trial_type'] == 'DMRS180':
test_onset_sup = (par['fix_time']+par['sample_time']+par['delay_time'])//par['dt']
"""
x = np.split(trial_info['neural_input'][:,trial_ind,:],trial_length,axis=0)
desired_output = trial_info['desired_output'][:,trial_ind,:]
train_mask = np.copy(mask[:,trial_ind])
"""
train_mask = trial_info['train_mask'][:,trial_ind]
if par['trial_type'] == 'ABBA' or par['trial_type'] == 'ABCA':
train_mask[test_onset_sup + par['ABBA_delay']//par['dt']:, :] = 0
"""
syn_x_init = np.copy(syn_x[0,trial_ind,:])
syn_u_init = np.copy(syn_u[0,trial_ind,:])
hidden_init = np.copy(h[0,trial_ind,:])
y, _, _, _ = run_model(x, hidden_init, syn_x_init, syn_u_init, network_weights)
acc, acc_non_match, acc_match = get_perf(desired_output, y, train_mask)
simulation_results['accuracy_no_suppression'] = np.array([acc, acc_non_match, acc_match])
for k, k1 in product(range(len(suppression_time_range)), range(len(neuron_groups))):
suppress_activity = np.ones((trial_length,par['n_hidden']))
for m1, m2 in product(neuron_groups[k1], suppression_time_range[k]):
suppress_activity[m2,m1] = 0
suppress_activity = np.split(suppress_activity, trial_length, axis=0)
syn_x_init = np.array(syn_x[0,trial_ind,:])
syn_u_init = np.array(syn_u[0,trial_ind,:])
hidden_init = np.array(h[0,trial_ind,:])
y, _, syn_x_sim, syn_u_sim = run_model(x, hidden_init, syn_x_init, \
syn_u_init, network_weights, suppress_activity = suppress_activity)
acc, acc_non_match, acc_match = get_perf(desired_output, y, train_mask)
simulation_results['accuracy_suppression'][r,k,k1,:] = np.array([acc, acc_non_match, acc_match])
syn_efficacy = syn_x_sim*syn_u_sim
for hidden_num in range(par['n_hidden']):
for t1 in range(syn_x_sim.shape[1]):
weights = np.linalg.lstsq(test_dir[trial_ind,:], syn_efficacy[t1,trial_ind,t1,trial_ind,hidden_num])
weights = np.reshape(weights[0],(3,1))
pred_err = syn_efficacy[t1,trial_ind,t1,trial_ind,hidden_num] - np.dot(test_dir[trial_ind,:], weights).T
mse = np.mean(pred_err**2)
response_var = np.var(syn_efficacy[hidden_num,t1,trial_ind])
simulation_results['synaptic_pev_test_suppression'][r,k,k1, hidden_num,t1] = 1 - mse/(response_var+1e-9)
simulation_results['synaptic_pref_dir_test_suppression'][r,k,k1,hidden_num,t1] = np.arctan2(weights[2,0],weights[1,0])
return simulation_results
def calculate_tuning(h, syn_x, syn_u, trial_info, trial_time, network_weights, calculate_test = False):
""" Calculates neuronal and synaptic sample motion direction tuning """
epsilon = 1e-9
num_test_stimuli = 1 # only analyze the first test stimulus
mask = np.array(trial_info['train_mask'])
if par['trial_type'] == 'dualDMS':
sample = trial_info['sample']
test = trial_info['test'][:,:,:num_test_stimuli]
rule = trial_info['rule'][:,0] + 2*trial_info['rule'][:,1]
par['num_rules'] = 4
par['num_receptive_fields'] = 2
test_onset = (par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time'])//par['dt']
suppression_time_range = [range(test_onset-50//par['dt'], test_onset)]
elif par['trial_type'] == 'ABBA' or par['trial_type'] == 'ABCA':
test = trial_info['test'][:,:num_test_stimuli]
rule = np.array(trial_info['rule'])
sample = np.reshape(np.array(trial_info['sample']),(par['batch_size'], 1))
test_onset = (par['dead_time']+par['fix_time']+par['sample_time']+par['ABBA_delay'])//par['dt']
suppression_time_range = [range(test_onset-200//par['dt'], test_onset)]
# onyl want to examine accuracy on 2nd pulse
t0 = (par['dead_time']+par['fix_time']+par['sample_time'] + 2*par['ABBA_delay'])//par['dt']
mask[:t0,:] = 0
t0 = (par['dead_time']+par['fix_time']+par['sample_time'] + 4*par['ABBA_delay'])//par['dt']
mask[t0:,:] = 0
elif par['trial_type'] == 'location_DMS':
par['num_receptive_fields'] = 1
test = np.reshape(np.array(trial_info['test']),(par['batch_size'], 1))
test_onset = (par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time'])//par['dt']
sample = np.reshape(np.array(trial_info['sample']),(par['batch_size'], 1))
rule = np.array(trial_info['rule'])
match = np.array(trial_info['match'])
suppression_time_range = [range(test_onset-200//par['dt'], test_onset)]
else:
rule = np.array(trial_info['rule'])
match = np.array(trial_info['match'])
sample = np.reshape(np.array(trial_info['sample']),(par['batch_size'], 1))
test = np.reshape(np.array(trial_info['test']),(par['batch_size'], 1))
test_onset = (par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time'])//par['dt']
suppression_time_range = [range(test_onset-50//par['dt'], test_onset)]
num_time_steps = len(trial_time)
tuning_results = {
'neuronal_pref_dir' : np.zeros((par['n_hidden'], par['num_rules'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'synaptic_pref_dir' : np.zeros((par['n_hidden'], par['num_rules'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'neuronal_pev' : np.zeros((par['n_hidden'], par['num_rules'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'synaptic_pev' : np.zeros((par['n_hidden'], par['num_rules'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'neuronal_pev_test' : np.zeros((par['n_hidden'], par['num_rules'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'synaptic_pev_test' : np.zeros((par['n_hidden'], par['num_rules'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'neuronal_pev_match' : np.zeros((par['n_hidden'], par['num_rules'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'synaptic_pev_match' : np.zeros((par['n_hidden'], par['num_rules'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'neuronal_pref_dir_test': np.zeros((par['n_hidden'], par['num_rules'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'synaptic_pref_dir_test': np.zeros((par['n_hidden'], par['num_rules'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'acc_neuronal_suppression': np.zeros((par['num_rules'], len(suppression_time_range), len(neuron_groups), 3)),
'neuronal_sample_tuning': np.zeros((par['n_hidden'], par['num_rules'], par['num_motion_dirs'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'synaptic_sample_tuning': np.zeros((par['n_hidden'], par['num_rules'], par['num_motion_dirs'], par['num_receptive_fields'], num_time_steps), dtype=np.float32),
'synaptic_pev_test_suppression' : np.zeros((par['num_rules'], len(suppression_time_range), len(neuron_groups), par['n_hidden'], num_time_steps)),
'synaptic_pref_dir_test_suppression': np.zeros((par['num_rules'], len(suppression_time_range), len(neuron_groups), par['n_hidden'], num_time_steps))}
syn_efficacy = syn_x*syn_u
sample_dir = np.ones((par['batch_size'], 3, par['num_receptive_fields']))
for rf in range(par['num_receptive_fields']):
sample_dir[:,1, rf] = np.cos(2*np.pi*sample[:,rf]/par['num_motion_dirs'])
sample_dir[:,2, rf] = np.sin(2*np.pi*sample[:,rf]/par['num_motion_dirs'])
test_dir = np.ones((par['batch_size'], 3, par['num_receptive_fields']))
for rf in range(par['num_receptive_fields']):
test_dir[:,1, rf] = np.reshape(np.cos(2*np.pi*test[:, rf]/par['num_motion_dirs']), (par['batch_size']))
test_dir[:,2, rf] = np.reshape(np.sin(2*np.pi*test[:, rf]/par['num_motion_dirs']), (par['batch_size']))
for r in range(par['num_rules']):
trial_ind = np.where((rule==r))[0]
for n in range(par['n_hidden']):
for t in range(num_time_steps):
# Mean sample response
for md in range(par['num_motion_dirs']):
for rf in range(par['num_receptive_fields']):
ind_motion_dir = np.where((rule==r)*(sample[:,rf]==md))[0]
tuning_results['neuronal_sample_tuning'][n,r,md,rf,t] = np.mean(h[t,ind_motion_dir,n])
tuning_results['synaptic_sample_tuning'][n,r,md,rf,t] = np.mean(syn_efficacy[t,ind_motion_dir,n])
for rf in range(par['num_receptive_fields']):
# Neuronal sample tuning
weights = np.linalg.lstsq(sample_dir[trial_ind,:,rf], h[t,trial_ind,n], rcond=None)
weights = np.reshape(weights[0],(3,1))
pred_err = h[t,trial_ind,n] - np.dot(sample_dir[trial_ind,:,rf], weights).T
mse = np.mean(pred_err**2)
response_var = np.var(h[t,trial_ind,n])
if response_var > epsilon:
tuning_results['neuronal_pev'][n,r,rf,t] = 1 - mse/(response_var + epsilon)
tuning_results['neuronal_pref_dir'][n,r,rf,t] = np.arctan2(weights[2,0],weights[1,0])
if calculate_test:
weights = np.linalg.lstsq(test_dir[trial_ind,:,rf], h[t,trial_ind,n], rcond=None)
weights = np.reshape(weights[0],(3,1))
pred_err = h[t,trial_ind,n] - np.dot(test_dir[trial_ind,:,rf], weights).T
mse = np.mean(pred_err**2)
response_var = np.var(h[t,trial_ind,n])
if response_var > epsilon:
tuning_results['neuronal_pev_test'][n,r,rf,t] = 1 - mse/(response_var + epsilon)
tuning_results['neuronal_pref_dir_test'][n,r,rf,t] = np.arctan2(weights[2,0],weights[1,0])
# Synaptic sample tuning
weights = np.linalg.lstsq(sample_dir[trial_ind,:,rf], syn_efficacy[t,trial_ind,n], rcond=None)
weights = np.reshape(weights[0],(3,1))
pred_err = syn_efficacy[t,trial_ind,n] - np.dot(sample_dir[trial_ind,:,rf], weights).T
mse = np.mean(pred_err**2)
response_var = np.var(syn_efficacy[t,trial_ind,n])
tuning_results['synaptic_pev'][n,r,rf,t] = 1 - mse/(response_var + epsilon)
tuning_results['synaptic_pref_dir'][n,r,rf,t] = np.arctan2(weights[2,0],weights[1,0])
if calculate_test:
weights = np.linalg.lstsq(test_dir[trial_ind,:,rf], syn_efficacy[t,trial_ind,n], rcond=None)
weights = np.reshape(weights[0],(3,1))
pred_err = syn_efficacy[t,trial_ind,n] - np.dot(test_dir[trial_ind,:,rf], weights).T
mse = np.mean(pred_err**2)
response_var = np.var(syn_efficacy[t,trial_ind,n])
tuning_results['synaptic_pev_test'][n,r,rf,t] = 1 - mse/(response_var + epsilon)
tuning_results['synaptic_pref_dir_test'][n,r,rf,t] = np.arctan2(weights[2,0],weights[1,0])
if par['suppress_analysis']:
x = np.split(trial_info['neural_input'][:,trial_ind,:],num_time_steps,axis=0)
y = trial_info['desired_output'][:,trial_ind,:]
train_mask = np.array(mask[:,trial_ind])
syn_x_init = np.array(syn_x[0,trial_ind,:])
syn_u_init = np.array(syn_u[0,trial_ind,:])
hidden_init = np.array(h[0,trial_ind,:])
y_hat, _, _, _ = run_model(x, hidden_init, syn_x_init, syn_u_init, network_weights)
acc, acc_non_match, acc_match = get_perf(y, y_hat, train_mask)
tuning_results['accuracy_no_suppression'] = np.array([acc, acc_non_match, acc_match])
for k in range(len(suppression_time_range)):
for k1 in range(len(neuron_groups)):
suppress_activity = np.ones((num_time_steps, par['n_hidden']))
for m1, m2 in product(neuron_groups[k1], suppression_time_range[k]):
suppress_activity[m2, m1] = 0
suppress_activity = np.split(suppress_activity, num_time_steps, axis=0)
y_hat, _, syn_x_sim, syn_u_sim = run_model(x, hidden_init, syn_x_init, \
syn_u_init, network_weights, suppress_activity = suppress_activity)
acc, acc_non_match, acc_match = get_perf(y, y_hat, train_mask)
tuning_results['acc_neuronal_suppression'][r,k,k1,:] = np.array([acc, acc_non_match, acc_match])
syn_efficacy = syn_x_sim*syn_u_sim
for hidden_num in range(par['n_hidden']):
for t1 in range(num_time_steps):
weights = np.linalg.lstsq(test_dir[trial_ind,:,0], syn_efficacy[t1,trial_ind,hidden_num], rcond=None)
weights = np.reshape(weights[0],(3,1))
pred_err = syn_efficacy[t1,trial_ind,hidden_num] - np.dot(test_dir[trial_ind,:,0], weights).T
mse = np.mean(pred_err**2)
response_var = np.var(syn_efficacy[t1,trial_ind,hidden_num])
tuning_results['synaptic_pev_test_suppression'][r,k,k1, hidden_num,t1] = 1 - mse/(response_var+1e-9)
tuning_results['synaptic_pref_dir_test_suppression'][r,k,k1,hidden_num,t1] = np.arctan2(weights[2,0],weights[1,0])
return tuning_results
def run_model(x, h_init_org, syn_x_init_org, syn_u_init_org, weights, suppress_activity = None):
""" Simulate the RNN """
# copying data to ensure nothing gets changed upstream
h_init = copy.copy(h_init_org)
syn_x_init = copy.copy(syn_x_init_org)
syn_u_init = copy.copy(syn_u_init_org)
network_weights = {k:v for k,v in weights.items()}
if par['EI']:
network_weights['w_rnn'] = par['EI_matrix'] @ np.maximum(0,network_weights['w_rnn'])
network_weights['w_in'] = np.maximum(0,network_weights['w_in'])
network_weights['w_out'] = np.maximum(0,network_weights['w_out'])
h, syn_x, syn_u = \
rnn_cell_loop(x, h_init, syn_x_init, syn_u_init, network_weights, suppress_activity)
# Network output
y = [h0 @ network_weights['w_out'] + weights['b_out'] for h0 in h]
syn_x = np.stack(syn_x)
syn_u = np.stack(syn_u)
h = np.stack(h)
y = np.stack(y)
return y, h, syn_x, syn_u
def rnn_cell_loop(x_unstacked, h, syn_x, syn_u, weights, suppress_activity):
h_hist = []
syn_x_hist = []
syn_u_hist = []
# Loop through the neural inputs to the RNN
for t, rnn_input in enumerate(x_unstacked):
if suppress_activity is not None:
h, syn_x, syn_u = rnn_cell(np.squeeze(rnn_input), h, syn_x, syn_u, weights, suppress_activity[t])
else:
h, syn_x, syn_u = rnn_cell(np.squeeze(rnn_input), h, syn_x, syn_u, weights, 1)
print('h ', h.shape, ' syn_x ', syn_x.shape, ' syn_u ', syn_u.shape)
h_hist.append(h)
syn_x_hist.append(syn_x)
syn_u_hist.append(syn_u)
return h_hist, syn_x_hist, syn_u_hist
def rnn_cell(rnn_input, h, syn_x, syn_u, weights, suppress_activity):
# Update the synaptic plasticity paramaters
if par['synapse_config'] is not None:
# implement both synaptic short term facilitation and depression
syn_x_new = syn_x + (par['alpha_std']*(1-syn_x) - par['dt_sec']*syn_u*syn_x*h)*par['dynamic_synapse']
syn_u_new = syn_u + (par['alpha_stf']*(par['U']-syn_u) + par['dt_sec']*par['U']*(1-syn_u)*h)*par['dynamic_synapse']
syn_x_new = np.minimum(1, np.maximum(0, syn_x_new))
syn_u_new = np.minimum(1, np.maximum(0, syn_u_new))
h_post = syn_u_new*syn_x_new*h
else:
# no synaptic plasticity
h_post = h
# Update the hidden state
h = np.maximum(0, h*(1-par['alpha_neuron'])
+ par['alpha_neuron']*(rnn_input @ weights['w_in']
+ h_post @ weights['w_rnn'] + weights['b_rnn'])
+ np.random.normal(0, par['noise_rnn'],size = h.shape))
h *= suppress_activity
if par['synapse_config'] is None:
syn_x_new = np.ones_like(h)
syn_u_new = np.ones_like(h)
return h, syn_x_new, syn_u_new
def get_perf(target, output, mask):
""" Calculate task accuracy by comparing the actual network output to the desired output
only examine time points when test stimulus is on, e.g. when y[:,:,0] = 0 """
mask_full = np.float32(mask > 0)
mask_test = mask_full*(target[:,:,0]==0)
mask_non_match = mask_full*(target[:,:,1]==1)
mask_match = mask_full*(target[:,:,2]==1)
target_max = np.argmax(target, axis = 2)
output_max = np.argmax(output, axis = 2)
accuracy = np.sum(np.float32(target_max == output_max)*mask_test)/np.sum(mask_test)
accuracy_non_match = np.sum(np.float32(target_max == output_max)*np.squeeze(mask_non_match))/np.sum(mask_non_match)
accuracy_match = np.sum(np.float32(target_max == output_max)*np.squeeze(mask_match))/np.sum(mask_match)
return accuracy, accuracy_non_match, accuracy_match
| nmasse/Short-term-plasticity-RNN | analysis.py | Python | apache-2.0 | 49,221 | [
"NEURON"
] | 4c87ef0ad79a5554052800e412bd791659d5be4d85bb0f7741180aabec70bfcf |
"""Functions supports the creation of general 1 and n-dim profiles including
normal and sersic profiles
TODO:
Limitations:
1. The sersic profile assumes radial symmetry
"""
import numpy as np
class FunctionsError(Exception):
"""Exception Raised for Function errors"""
pass
def Normal(arr, mean, sigma, scale=1):
"""Return the normal distribution of N dimensions
arr--The input array of N dimensions
mean--the mean of the distribution--either a scalor or N-dimensional array
sigma--the std deviation of the distribution--either a scalor or N-dimensial array
scale--the scale of the distrubion
"""
arr = arr
mean = mean
sigma = sigma
scale = scale
dim = np.ndim(arr) - 1
# check to make sure that mean and sigma have dimensions that match dim
# and create the arrays to calculate the distribution
if isinstance(mean, np.ndarray):
if len(mean) != dim:
raise FunctionsError('Mean and input array are different number of dimensions')
mean = np.reshape(mean, (dim, 1, 1))
if isinstance(sigma, np.ndarray):
if len(sigma) != dim:
raise FunctionsError('Sigma and input array are different number of dimensions')
sigma = np.reshape(sigma, (dim, 1, 1))
# calculate the gaussian
z = scale * np.exp(-0.5 * (arr - mean) ** 2 / sigma)
return z
def sersic(arr, deg, r_e, ell=1, scale=1):
"""Produce a light distribution given by a sersic profile
I=I_e exp(-b [ (R/R_E)^(1/n)-1])
where:
Gamma(2n)=gamma(2n, b)
"""
dim = np.ndim(arr) - 1
# assume radial symmetry
if dim == 0:
r = abs(arr / r_e)
else:
pass
z = scale * np.exp(-ell * (r ** (1 / deg) - 1))
return z
| crawfordsm/pyspectrograph | PySpectrograph/Utilities/Functions.py | Python | bsd-3-clause | 1,782 | [
"Gaussian"
] | 4e4cd36252b57c9df0b3eca2c1d868a8fca72197df4d00cc1e0ad4793bc76421 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# grep - text search
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Emulate the un*x function with the same name"""
import os
import glob
import re
import time
import shared.returnvalues as returnvalues
from shared.base import client_id_dir
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.init import initialize_main_variables
from shared.parseflags import verbose, binary
from shared.validstring import valid_user_path
def signature():
"""Signature of the main function"""
defaults = {'path': REJECT_UNSET, 'flags': [''],
'pattern': REJECT_UNSET}
return ['file_output', defaults]
def pattern_match_file(pattern, filename, allowed_time=5.0):
"""Return lines in file which match the provided pattern"""
# Allow comparison to take up to allowed_time seconds
fit = []
start_time = time.time()
file_fd = open(filename, 'r')
for line in file_fd:
compare_time = time.time() - start_time
if compare_time > allowed_time:
# print "Template fit of %s against %s timed out after %d lines (%d seconds)" % (template, filename, i, compare_time)
break
if re.search(pattern, line.strip()):
fit.append(line)
file_fd.close()
# print "Comparison of %s against %s done in %.4f seconds" % (template, filename, compare_time)
return fit
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
client_dir = client_id_dir(client_id)
status = returnvalues.OK
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
flags = ''.join(accepted['flags'])
patterns = accepted['path']
search = accepted['pattern'][-1]
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = os.path.abspath(os.path.join(configuration.user_home,
client_dir)) + os.sep
if verbose(flags):
for flag in flags:
output_objects.append({'object_type': 'text', 'text'
: '%s using flag: %s' % (op_name,
flag)})
for pattern in patterns:
# Check directory traversal attempts before actual handling to avoid
# leaking information about file system layout while allowing
# consistent error messages
unfiltered_match = glob.glob(base_dir + pattern)
match = []
for server_path in unfiltered_match:
real_path = os.path.abspath(server_path)
if not valid_user_path(real_path, base_dir, True):
# out of bounds - save user warning for later to allow
# partial match:
# ../*/* is technically allowed to match own files.
logger.warning('%s tried to %s restricted path %s! (%s)'
% (client_id, op_name, real_path, pattern))
continue
match.append(real_path)
# Now actually treat list of allowed matchings and notify if no
# (allowed) match
if not match:
output_objects.append({'object_type': 'file_not_found',
'name': pattern})
status = returnvalues.FILE_NOT_FOUND
for real_path in match:
relative_path = real_path.replace(base_dir, '')
output_lines = []
try:
matching = pattern_match_file(search, real_path)
for line in matching:
output_lines.append(line)
except Exception, exc:
output_objects.append({'object_type': 'error_text',
'text': "%s: '%s': %s" % (op_name,
relative_path, exc)})
logger.error("%s: failed on '%s': %s" % (op_name,
relative_path, exc))
status = returnvalues.SYSTEM_ERROR
continue
entry = {'object_type': 'file_output',
'lines': output_lines,
'wrap_binary': binary(flags),
'wrap_targets': ['lines']}
if verbose(flags):
entry['path'] = relative_path
output_objects.append(entry)
return (output_objects, status)
| heromod/migrid | mig/shared/functionality/grep.py | Python | gpl-2.0 | 5,569 | [
"Brian"
] | 77d9a0ecbabe243b68f23c370a8ee5b8fee02dc38d29c064576b86dadcb3d752 |
import oauth2 as oauth
import time
import json
import traceback
import config as cfg
consumer = oauth.Consumer(key=cfg.twitter['consumer_key'], secret=cfg.twitter['consumer_secret'])
access_token = oauth.Token(key=cfg.twitter['access_token'], secret=cfg.twitter['access_token_secret'])
client = oauth.Client(consumer, access_token)
screen_names=['jack','biz','noah','crystal','jeremy']
for screen_name in screen_names:
next_cursor=-1
while next_cursor!=0:
try:
if next_cursor<0:
timeline_endpoint = "https://api.twitter.com/1.1/friends/ids.json?count=5000&cursor=-1&screen_name=" + screen_name
else:
timeline_endpoint = "https://api.twitter.com/1.1/friends/ids.json?count=5000&cursor=" + str(next_cursor) +"&screen_name=" + screen_name
response, data = client.request(timeline_endpoint)
print(response['status'])
if response['status']=='200':
dataResult = json.loads(data)
next_cursor = dataResult['next_cursor']
for userFollower in dataResult['ids']:
print(userFollower)
elif response['status']=='400' or response['status']=='403' or response['status']=='404' or response['status']=='401':
next_cursor=0
else:
next_cursor=-1
if 'x-rate-limit-limit' in response:
print("wait for ",(15*60)/int(response['x-rate-limit-limit'])," seconds")
time.sleep((15*60)/int(response['x-rate-limit-limit']))
else:
print(response)
print("wait for ",(15*60)," seconds")
time.sleep((15*60))
except Exception:
print(traceback.format_exc())
time.sleep(60)
| mirkolai/twitter-experiment | python/rest API/friends-list.py | Python | gpl-2.0 | 1,819 | [
"CRYSTAL"
] | fdff25b6e916b5684da1097a686ce0336999e68f948101fef72809de0dd166fe |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import os
import json
import numpy as np
import warnings
import xml.etree.cElementTree as ET
from pymatgen.core.periodic_table import Element
from pymatgen.electronic_structure.core import OrbitalType
from pymatgen.io.vasp.inputs import Kpoints
from pymatgen.io.vasp.outputs import Chgcar, Locpot, Oszicar, Outcar, \
Vasprun, Procar, Xdatcar, Dynmat, BSVasprun, UnconvergedVASPWarning, \
Wavecar
from pymatgen import Spin, Orbital, Lattice, Structure
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.testing import PymatgenTest
"""
Created on Jul 16, 2012
"""
__author__ = "Shyue Ping Ong, Stephen Dacek, Mark Turiansky"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jul 16, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class VasprunTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.resetwarnings()
def test_multiple_dielectric(self):
v = Vasprun(os.path.join(test_dir, "vasprun.GW0.xml"))
self.assertEqual(len(v.other_dielectric), 3)
def test_charge_charge_dielectric(self):
"""
VASP 5.4.4 writes out two dielectric functions to vasprun.xml
These are the "density-density" and "velocity-velocity" linear response functions.
See the comments in `linear_optics.F` for details.
"""
v = Vasprun(os.path.join(test_dir, "vasprun.xml.dielectric_5.4.4"),
parse_potcar_file=False)
self.assertEqual(v.dielectric is not None, True)
self.assertEqual('density' in v.dielectric_data, True)
self.assertEqual('velocity' in v.dielectric_data, True)
def test_optical_absorption_coeff(self):
v = Vasprun(os.path.join(test_dir, "vasprun.BSE.xml.gz"))
absorption_coeff = v.optical_absorption_coeff
self.assertEqual(absorption_coeff[1], 24966408728.917931)
def test_vasprun_with_more_than_two_unlabelled_dielectric_functions(self):
with self.assertRaises(NotImplementedError):
Vasprun(os.path.join(test_dir, "vasprun.xml.dielectric_bad"),
parse_potcar_file=False)
def test_bad_vasprun(self):
self.assertRaises(ET.ParseError,
Vasprun, os.path.join(test_dir, "bad_vasprun.xml"))
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
v = Vasprun(os.path.join(test_dir, "bad_vasprun.xml"),
exception_on_bad_xml=False)
# Verify some things
self.assertEqual(len(v.ionic_steps), 1)
self.assertAlmostEqual(v.final_energy, -269.00551374)
self.assertTrue(issubclass(w[-1].category,
UserWarning))
def test_vdw(self):
v = Vasprun(os.path.join(test_dir, "vasprun.xml.vdw"))
self.assertAlmostEqual(v.final_energy, -9.78310677)
def test_properties(self):
filepath = os.path.join(test_dir, 'vasprun.xml.nonlm')
vasprun = Vasprun(filepath, parse_potcar_file=False)
orbs = list(vasprun.complete_dos.pdos[vasprun.final_structure[
0]].keys())
self.assertIn(OrbitalType.s, orbs)
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath, parse_potcar_file=False)
# Test NELM parsing.
self.assertEqual(vasprun.parameters["NELM"], 60)
# test pdos parsing
pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]
self.assertAlmostEqual(pdos0[Orbital.s][Spin.up][16], 0.0026)
self.assertAlmostEqual(pdos0[Orbital.pz][Spin.down][16], 0.0012)
self.assertEqual(pdos0[Orbital.s][Spin.up].shape, (301,))
filepath2 = os.path.join(test_dir, 'lifepo4.xml')
vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True,
parse_potcar_file=False)
totalscsteps = sum([len(i['electronic_steps'])
for i in vasprun.ionic_steps])
self.assertEqual(29, len(vasprun.ionic_steps))
self.assertEqual(len(vasprun.structures), len(vasprun.ionic_steps))
self.assertEqual(vasprun.lattice,
vasprun.lattice_rec.reciprocal_lattice)
for i, step in enumerate(vasprun.ionic_steps):
self.assertEqual(vasprun.structures[i], step["structure"])
self.assertTrue(all([vasprun.structures[i] == vasprun.ionic_steps[i][
"structure"] for i in range(len(vasprun.ionic_steps))]))
self.assertEqual(308, totalscsteps,
"Incorrect number of energies read from vasprun.xml")
self.assertEqual(['Li'] + 4 * ['Fe'] + 4 * ['P'] + 16 * ["O"],
vasprun.atomic_symbols)
self.assertEqual(vasprun.final_structure.composition.reduced_formula,
"LiFe4(PO4)4")
self.assertIsNotNone(vasprun.incar, "Incar cannot be read")
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.eigenvalues, "Eigenvalues cannot be read")
self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)
self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)
expectedans = (2.539, 4.0906, 1.5516, False)
(gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties
self.assertAlmostEqual(gap, expectedans[0])
self.assertAlmostEqual(cbm, expectedans[1])
self.assertAlmostEqual(vbm, expectedans[2])
self.assertEqual(direct, expectedans[3])
self.assertFalse(vasprun.is_hubbard)
self.assertEqual(vasprun.potcar_symbols,
['PAW_PBE Li 17Jan2003', 'PAW_PBE Fe 06Sep2000',
'PAW_PBE Fe 06Sep2000', 'PAW_PBE P 17Jan2003',
'PAW_PBE O 08Apr2002'])
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints,
"Actual kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints_weights,
"Actual kpoints weights cannot be read")
for atomdoses in vasprun.pdos:
for orbitaldos in atomdoses:
self.assertIsNotNone(orbitaldos, "Partial Dos cannot be read")
# test skipping ionic steps.
vasprun_skip = Vasprun(filepath, 3, parse_potcar_file=False)
self.assertEqual(vasprun_skip.nionic_steps, 29)
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
self.assertEqual(len(vasprun_skip.ionic_steps),
len(vasprun_skip.structures))
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
# Check that nionic_steps is preserved no matter what.
self.assertEqual(vasprun_skip.nionic_steps,
vasprun.nionic_steps)
self.assertNotAlmostEqual(vasprun_skip.final_energy,
vasprun.final_energy)
# Test with ionic_step_offset
vasprun_offset = Vasprun(filepath, 3, 6, parse_potcar_file=False)
self.assertEqual(len(vasprun_offset.ionic_steps),
int(len(vasprun.ionic_steps) / 3) - 1)
self.assertEqual(vasprun_offset.structures[0],
vasprun_skip.structures[2])
self.assertTrue(vasprun_ggau.is_hubbard)
self.assertEqual(vasprun_ggau.hubbards["Fe"], 4.3)
self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[Spin.up][
0][0][96][0], 0.0032)
d = vasprun_ggau.as_dict()
self.assertEqual(d["elements"], ["Fe", "Li", "O", "P"])
self.assertEqual(d["nelements"], 4)
filepath = os.path.join(test_dir, 'vasprun.xml.unconverged')
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
vasprun_unconverged = Vasprun(filepath, parse_potcar_file=False)
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category,
UnconvergedVASPWarning))
self.assertTrue(vasprun_unconverged.converged_ionic)
self.assertFalse(vasprun_unconverged.converged_electronic)
self.assertFalse(vasprun_unconverged.converged)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt')
vasprun_dfpt = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][0],
3.33402531)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][1],
-0.00559998)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[2][2],
3.31237357)
self.assertTrue(vasprun_dfpt.converged)
entry = vasprun_dfpt.get_computed_entry()
entry = MaterialsProjectCompatibility(
check_potcar_hash=False).process_entry(entry)
self.assertAlmostEqual(entry.uncorrected_energy + entry.correction,
entry.energy)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.ionic')
vasprun_dfpt_ionic = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][0],
515.73485838)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][1],
-0.00263523)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[2][2],
19.02110169)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.unconverged')
vasprun_dfpt_unconv = Vasprun(filepath, parse_potcar_file=False)
self.assertFalse(vasprun_dfpt_unconv.converged_electronic)
self.assertTrue(vasprun_dfpt_unconv.converged_ionic)
self.assertFalse(vasprun_dfpt_unconv.converged)
vasprun_uniform = Vasprun(os.path.join(test_dir, "vasprun.xml.uniform"),
parse_potcar_file=False)
self.assertEqual(vasprun_uniform.kpoints.style,
Kpoints.supported_modes.Reciprocal)
vasprun_no_pdos = Vasprun(os.path.join(test_dir, "Li_no_projected.xml"),
parse_potcar_file=False)
self.assertIsNotNone(vasprun_no_pdos.complete_dos)
self.assertFalse(vasprun_no_pdos.dos_has_errors)
vasprun_diel = Vasprun(os.path.join(test_dir, "vasprun.xml.dielectric"),
parse_potcar_file=False)
self.assertAlmostEqual(0.4294, vasprun_diel.dielectric[0][10])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][0])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][1])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][2])
self.assertAlmostEqual(0.0, vasprun_diel.dielectric[1][51][3])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][0])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][1])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][2])
self.assertAlmostEqual(0.0, vasprun_diel.dielectric[2][85][3])
v = Vasprun(os.path.join(test_dir, "vasprun.xml.indirect.gz"))
(gap, cbm, vbm, direct) = v.eigenvalue_band_properties
self.assertFalse(direct)
vasprun_optical = Vasprun(
os.path.join(test_dir, "vasprun.xml.opticaltransitions"),
parse_potcar_file=False)
self.assertAlmostEqual(3.084, vasprun_optical.optical_transition[0][0])
self.assertAlmostEqual(3.087, vasprun_optical.optical_transition[3][0])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[0][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[1][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[7][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[19][1])
self.assertAlmostEqual(3.3799999999,
vasprun_optical.optical_transition[54][0])
self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[55][0])
self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[56][0])
self.assertAlmostEqual(10554.9860,
vasprun_optical.optical_transition[54][1])
self.assertAlmostEqual(0.0, vasprun_optical.optical_transition[55][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[56][1])
def test_force_constants(self):
vasprun_fc = Vasprun(os.path.join(test_dir, "vasprun.xml.dfpt.phonon"),
parse_potcar_file=False)
fc_ans = [[-0.00184451, -0., -0.],
[-0., -0.00933824, -0.03021279],
[-0., -0.03021279, 0.01202547]]
nm_ans = [[0.0884346, -0.08837289, -0.24995639],
[-0.0884346, 0.08837289, 0.24995639],
[0.15306645, -0.05105771, -0.14441306],
[-0.15306645, 0.05105771, 0.14441306],
[-0.0884346, 0.08837289, 0.24995639],
[0.0884346, -0.08837289, -0.24995639],
[-0.15306645, 0.05105771, 0.14441306],
[0.15306645, -0.05105771, -0.14441306],
[-0.0884346, 0.08837289, 0.24995639],
[0.0884346, -0.08837289, -0.24995639],
[-0.15306645, 0.05105771, 0.14441306],
[0.15306645, -0.05105771, -0.14441306],
[0.0884346, -0.08837289, -0.24995639],
[-0.0884346, 0.08837289, 0.24995639],
[0.15306645, -0.05105771, -0.14441306],
[-0.15306645, 0.05105771, 0.14441306]]
nm_eigenval_ans = [-0.59067079, -0.59067079, -0.59067003, -0.59067003,
-0.59067003, -0.59067003, -0.585009, -0.585009,
-0.58500895, -0.58500883, -0.5062956, -0.5062956]
self.assertEqual(vasprun_fc.force_constants.shape, (16, 16, 3, 3))
self.assertTrue(np.allclose(vasprun_fc.force_constants[8, 9], fc_ans))
self.assertEqual(vasprun_fc.normalmode_eigenvals.size, 48)
self.assertTrue(np.allclose(vasprun_fc.normalmode_eigenvals[17:29],
nm_eigenval_ans))
self.assertEqual(vasprun_fc.normalmode_eigenvecs.shape, (48, 16, 3))
self.assertTrue(
np.allclose(vasprun_fc.normalmode_eigenvecs[33], nm_ans))
def test_Xe(self):
vr = Vasprun(os.path.join(test_dir, 'vasprun.xml.xe'),
parse_potcar_file=False)
self.assertEqual(vr.atomic_symbols, ['Xe'])
def test_invalid_element(self):
self.assertRaises(ValueError, Vasprun,
os.path.join(test_dir, 'vasprun.xml.wrong_sp'))
def test_selective_dynamics(self):
vsd = Vasprun(os.path.join(test_dir, 'vasprun.xml.indirect.gz'))
np.testing.assert_array_equal(
vsd.final_structure.site_properties.get('selective_dynamics'),
[[True] * 3, [False] * 3], "Selective dynamics parsing error")
def test_as_dict(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath,
parse_potcar_file=False)
# Test that as_dict() is json-serializable
self.assertIsNotNone(json.dumps(vasprun.as_dict()))
self.assertEqual(
vasprun.as_dict()["input"]["potcar_type"],
['PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE'])
def test_get_band_structure(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = os.path.join(test_dir, 'vasprun_Si_bands.xml')
vasprun = Vasprun(filepath,
parse_projected_eigen=True,
parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=os.path.join(test_dir,
'KPOINTS_Si_bands'))
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13],
"wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64])
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
projected = bs.get_projection_on_elements()
self.assertAlmostEqual(projected[Spin.up][0][0]["Si"], 0.4238)
projected = bs.get_projections_on_elements_and_orbitals(
{"Si": ["s"]})
self.assertAlmostEqual(projected[Spin.up][0][0]["Si"]["s"], 0.4238)
def test_sc_step_overflow(self):
filepath = os.path.join(test_dir, 'vasprun.xml.sc_overflow')
# with warnings.catch_warnings(record=True) as w:
# warnings.simplefilter("always")
# vasprun = Vasprun(filepath)
# self.assertEqual(len(w), 3)
vasprun = Vasprun(filepath)
estep = vasprun.ionic_steps[0]['electronic_steps'][29]
self.assertTrue(np.isnan(estep['e_wo_entrp']))
def test_update_potcar(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
potcar_path = os.path.join(test_dir, 'POTCAR.LiFePO4.gz')
potcar_path2 = os.path.join(test_dir, 'POTCAR2.LiFePO4.gz')
vasprun = Vasprun(filepath, parse_potcar_file=False)
self.assertEqual(vasprun.potcar_spec,
[{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
vasprun.update_potcar_spec(potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
vasprun2 = Vasprun(filepath, parse_potcar_file=False)
self.assertRaises(ValueError, vasprun2.update_potcar_spec, potcar_path2)
vasprun = Vasprun(filepath, parse_potcar_file=potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
self.assertRaises(ValueError, Vasprun, filepath,
parse_potcar_file=potcar_path2)
def test_search_for_potcar(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath, parse_potcar_file=True)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
def test_potcar_not_found(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
# Ensure no potcar is found and nothing is updated
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
vasprun = Vasprun(filepath, parse_potcar_file='.')
self.assertEqual(len(w), 2)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
def test_parsing_chemical_shift_calculations(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = os.path.join(test_dir, "nmr", "cs", "basic",
'vasprun.xml.chemical_shift.scstep')
vasprun = Vasprun(filepath)
nestep = len(vasprun.ionic_steps[-1]['electronic_steps'])
self.assertEqual(nestep, 10)
self.assertTrue(vasprun.converged)
def test_parsing_efg_calcs(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = os.path.join(test_dir, "nmr", "efg", "AlPO4",
'vasprun.xml')
vasprun = Vasprun(filepath)
nestep = len(vasprun.ionic_steps[-1]['electronic_steps'])
self.assertEqual(nestep, 18)
self.assertTrue(vasprun.converged)
def test_charged_structure(self):
vpath = os.path.join(test_dir, 'vasprun.charged.xml')
potcar_path = os.path.join(test_dir, 'POT_GGA_PAW_PBE', 'POTCAR.Si.gz')
vasprun = Vasprun(vpath, parse_potcar_file=False)
vasprun.update_charge_from_potcar(potcar_path)
self.assertEqual(vasprun.parameters.get("NELECT", 8), 9)
self.assertEqual(vasprun.structures[0].charge, 1)
class OutcarTest(PymatgenTest):
def test_init(self):
for f in ['OUTCAR', 'OUTCAR.gz']:
filepath = os.path.join(test_dir, f)
outcar = Outcar(filepath)
expected_mag = ({'d': 0.0, 'p': 0.003, 's': 0.002, 'tot': 0.005},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.0, 'p': -0.117, 's': 0.005, 'tot': -0.112},
{'d': 0.0, 'p': -0.165, 's': 0.004, 'tot': -0.162},
{'d': 0.0, 'p': -0.117, 's': 0.005, 'tot': -0.112},
{'d': 0.0, 'p': -0.165, 's': 0.004, 'tot': -0.162})
expected_chg = ({'p': 0.154, 's': 0.078, 'd': 0.0, 'tot': 0.232},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947})
self.assertAlmostEqual(outcar.magnetization, expected_mag, 5,
"Wrong magnetization read from Outcar")
self.assertAlmostEqual(outcar.charge, expected_chg, 5,
"Wrong charge read from Outcar")
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.938,
'Total CPU time used (sec)': 545.142,
'Elapsed time (sec)': 546.709,
'Maximum memory used (kb)': 0.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 544.204,
'cores': '8'})
self.assertAlmostEqual(outcar.efermi, 2.0112)
self.assertAlmostEqual(outcar.nelect, 44.9999991)
self.assertAlmostEqual(outcar.total_mag, 0.9999998)
self.assertIsNotNone(outcar.as_dict())
self.assertFalse(outcar.lepsilon)
filepath = os.path.join(test_dir, 'OUTCAR.stopped')
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
for f in ['OUTCAR.lepsilon', 'OUTCAR.lepsilon.gz']:
filepath = os.path.join(test_dir, f)
outcar = Outcar(filepath)
self.assertTrue(outcar.lepsilon)
self.assertAlmostEqual(outcar.dielectric_tensor[0][0], 3.716432)
self.assertAlmostEqual(outcar.dielectric_tensor[0][1], -0.20464)
self.assertAlmostEqual(outcar.dielectric_tensor[1][2], -0.20464)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][0],
0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][2],
0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[2][2],
0.001419)
self.assertAlmostEqual(outcar.piezo_tensor[0][0], 0.52799)
self.assertAlmostEqual(outcar.piezo_tensor[1][3], 0.35998)
self.assertAlmostEqual(outcar.piezo_tensor[2][5], 0.35997)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[0][0], 0.05868)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[1][3], 0.06241)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[2][5], 0.06242)
self.assertAlmostEqual(outcar.born[0][1][2], -0.385)
self.assertAlmostEqual(outcar.born[1][2][0], 0.36465)
filepath = os.path.join(test_dir, 'OUTCAR.NiO_SOC.gz')
outcar = Outcar(filepath)
expected_mag = (
{'s': Magmom([0.0, 0.0, -0.001]), 'p': Magmom([0.0, 0.0, -0.003]),
'd': Magmom([0.0, 0.0, 1.674]), 'tot': Magmom([0.0, 0.0, 1.671])},
{'s': Magmom([0.0, 0.0, 0.001]), 'p': Magmom([0.0, 0.0, 0.003]),
'd': Magmom([0.0, 0.0, -1.674]),
'tot': Magmom([0.0, 0.0, -1.671])},
{'s': Magmom([0.0, 0.0, 0.0]), 'p': Magmom([0.0, 0.0, 0.0]),
'd': Magmom([0.0, 0.0, 0.0]), 'tot': Magmom([0.0, 0.0, 0.0])},
{'s': Magmom([0.0, 0.0, 0.0]), 'p': Magmom([0.0, 0.0, 0.0]),
'd': Magmom([0.0, 0.0, 0.0]), 'tot': Magmom([0.0, 0.0, 0.0])}
)
# test note: Magmom class uses np.allclose() when testing for equality
# so fine to use assertEqual here
self.assertEqual(outcar.magnetization, expected_mag,
"Wrong vector magnetization read from Outcar for SOC calculation")
def test_polarization(self):
filepath = os.path.join(test_dir, "OUTCAR.BaTiO3.polar")
outcar = Outcar(filepath)
self.assertEqual(outcar.spin, True)
self.assertEqual(outcar.noncollinear, False)
self.assertAlmostEqual(outcar.p_ion[0], 0.0)
self.assertAlmostEqual(outcar.p_ion[1], 0.0)
self.assertAlmostEqual(outcar.p_ion[2], -5.56684)
self.assertAlmostEqual(outcar.p_sp1[0], 2.00068)
self.assertAlmostEqual(outcar.p_sp2[0], -2.00044)
self.assertAlmostEqual(outcar.p_elec[0], 0.00024)
self.assertAlmostEqual(outcar.p_elec[1], 0.00019)
self.assertAlmostEqual(outcar.p_elec[2], 3.61674)
def test_pseudo_zval(self):
filepath = os.path.join(test_dir, "OUTCAR.BaTiO3.polar")
outcar = Outcar(filepath)
self.assertDictEqual({'Ba': 10.00, 'Ti': 10.00, 'O': 6.00},
outcar.zval_dict)
def test_dielectric(self):
filepath = os.path.join(test_dir, "OUTCAR.dielectric")
outcar = Outcar(filepath)
outcar.read_corrections()
self.assertAlmostEqual(outcar.data["dipol_quadrupol_correction"],
0.03565)
self.assertAlmostEqual(outcar.final_energy, -797.46760559)
def test_freq_dielectric(self):
filepath = os.path.join(test_dir, "OUTCAR.LOPTICS")
outcar = Outcar(filepath)
outcar.read_freq_dielectric()
self.assertAlmostEqual(outcar.frequencies[0], 0)
self.assertAlmostEqual(outcar.frequencies[-1], 39.826101)
self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0],
8.96938800)
self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0],
7.36167000e-01 + 1.53800000e-03j)
self.assertEqual(len(outcar.frequencies),
len(outcar.dielectric_tensor_function))
np.testing.assert_array_equal(outcar.dielectric_tensor_function[0],
outcar.dielectric_tensor_function[
0].transpose())
def test_freq_dielectric_vasp544(self):
filepath = os.path.join(test_dir, "OUTCAR.LOPTICS.vasp544")
outcar = Outcar(filepath)
outcar.read_freq_dielectric()
self.assertAlmostEqual(outcar.frequencies[0], 0)
self.assertAlmostEqual(outcar.frequencies[-1], 39.63964)
self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0],
12.769435 + 0j)
self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0],
0.828615 + 0.016594j)
self.assertEqual(len(outcar.frequencies),
len(outcar.dielectric_tensor_function))
np.testing.assert_array_equal(outcar.dielectric_tensor_function[0],
outcar.dielectric_tensor_function[
0].transpose())
def test_read_elastic_tensor(self):
filepath = os.path.join(test_dir, "OUTCAR.total_tensor.Li2O.gz")
outcar = Outcar(filepath)
outcar.read_elastic_tensor()
self.assertAlmostEqual(outcar.data["elastic_tensor"][0][0], 1986.3391)
self.assertAlmostEqual(outcar.data["elastic_tensor"][0][1], 187.8324)
self.assertAlmostEqual(outcar.data["elastic_tensor"][3][3], 586.3034)
def test_read_piezo_tensor(self):
filepath = os.path.join(test_dir, "OUTCAR.lepsilon.gz")
outcar = Outcar(filepath)
outcar.read_piezo_tensor()
self.assertAlmostEqual(outcar.data["piezo_tensor"][0][0], 0.52799)
self.assertAlmostEqual(outcar.data["piezo_tensor"][1][3], 0.35998)
self.assertAlmostEqual(outcar.data["piezo_tensor"][2][5], 0.35997)
def test_core_state_eigen(self):
filepath = os.path.join(test_dir, "OUTCAR.CL")
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[6]["2s"][-1], -174.4779)
filepath = os.path.join(test_dir, "OUTCAR.icorelevel")
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[4]["3d"][-1], -31.4522)
def test_avg_core_poten(self):
filepath = os.path.join(test_dir, "OUTCAR.lepsilon")
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[-1][1], -90.0487)
filepath = os.path.join(test_dir, "OUTCAR")
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[0][6], -73.1068)
def test_single_atom(self):
filepath = os.path.join(test_dir, "OUTCAR.Al")
outcar = Outcar(filepath)
expected_mag = ({u'p': 0.0, u's': 0.0, u'd': 0.0, u'tot': 0.0},)
expected_chg = ({u'p': 0.343, u's': 0.425, u'd': 0.0, u'tot': 0.768},)
self.assertAlmostEqual(outcar.magnetization, expected_mag)
self.assertAlmostEqual(outcar.charge, expected_chg)
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.592,
'Total CPU time used (sec)': 50.194,
'Elapsed time (sec)': 52.337,
'Maximum memory used (kb)': 62900.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 49.602,
'cores': '32'})
self.assertAlmostEqual(outcar.efermi, 8.0942)
self.assertAlmostEqual(outcar.nelect, 3)
self.assertAlmostEqual(outcar.total_mag, 8.2e-06)
self.assertIsNotNone(outcar.as_dict())
def test_chemical_shielding(self):
filename = os.path.join(test_dir, "nmr", "cs", "core.diff",
"hydromagnesite", "OUTCAR")
outcar = Outcar(filename)
expected_chemical_shielding = [[191.9974, 69.5232, 0.6342],
[195.0808, 68.183, 0.833],
[192.0389, 69.5762, 0.6329],
[195.0844, 68.1756, 0.8336],
[192.005, 69.5289, 0.6339],
[195.0913, 68.1859, 0.833],
[192.0237, 69.565, 0.6333],
[195.0788, 68.1733, 0.8337]]
self.assertAlmostEqual(
len(outcar.data["chemical_shielding"]["valence_only"][20: 28]),
len(expected_chemical_shielding))
self.assertArrayAlmostEqual(outcar.data["chemical_shielding"]["valence_and_core"][20:28],
expected_chemical_shielding, decimal=5)
def test_chemical_shielding_with_different_core_contribution(self):
filename = os.path.join(test_dir, "nmr", "cs", "core.diff",
"core.diff.chemical.shifts.OUTCAR")
outcar = Outcar(filename)
c_vo = outcar.data["chemical_shielding"]["valence_only"][7]
for x1, x2 in zip(list(c_vo),
[198.7009, 73.7484, 1.0000]):
self.assertAlmostEqual(x1, x2)
c_vc = outcar.data["chemical_shielding"]["valence_and_core"][7]
for x1, x2 in zip(list(c_vc),
[-1.9406, 73.7484, 1.0000]):
self.assertAlmostEqual(x1, x2)
def test_cs_raw_tensors(self):
filename = os.path.join(test_dir, "nmr", "cs", "core.diff",
"core.diff.chemical.shifts.OUTCAR")
outcar = Outcar(filename)
unsym_tensors = outcar.data["unsym_cs_tensor"]
self.assertEqual(unsym_tensors[0],
[[-145.814605, -4.263425, 0.000301],
[4.263434, -145.812238, -8.7e-05],
[0.000136, -0.000189, -142.794068]])
self.assertEqual(unsym_tensors[29],
[[287.789318, -53.799325, 30.900024],
[-53.799571, 225.668117, -17.839598],
[3.801103, -2.195218, 88.896756]])
def test_cs_g0_contribution(self):
filename = os.path.join(test_dir, "nmr", "cs", "core.diff",
"core.diff.chemical.shifts.OUTCAR")
outcar = Outcar(filename)
g0_contrib = outcar.data["cs_g0_contribution"]
self.assertEqual(g0_contrib,
[[-8.773535, 9e-06, 1e-06],
[1.7e-05, -8.773536, -0.0792],
[-6e-06, -0.008328, -9.320237]])
def test_cs_core_contribution(self):
filename = os.path.join(test_dir, "nmr", "cs", "core.diff",
"core.diff.chemical.shifts.OUTCAR")
outcar = Outcar(filename)
core_contrib = outcar.data["cs_core_contribution"]
self.assertEqual(core_contrib,
{'Mg': -412.8248405,
'C': -200.5098812,
'O': -271.0766979})
def test_nmr_efg(self):
filename = os.path.join(test_dir, "nmr", "efg", "AlPO4", "OUTCAR")
outcar = Outcar(filename)
expected_efg = [
{'eta': 0.465, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.573},
{'eta': 0.465, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.573},
{'eta': 0.137, 'nuclear_quadrupole_moment': 146.6, 'cq': 6.327},
{'eta': 0.137, 'nuclear_quadrupole_moment': 146.6, 'cq': 6.327},
{'eta': 0.112, 'nuclear_quadrupole_moment': 146.6, 'cq': -7.453},
{'eta': 0.112, 'nuclear_quadrupole_moment': 146.6, 'cq': -7.453},
{'eta': 0.42, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.58},
{'eta': 0.42, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.58}]
self.assertEqual(len(outcar.data["efg"][2:10]), len(expected_efg))
for e1, e2 in zip(outcar.data["efg"][2:10], expected_efg):
for k in e1.keys():
self.assertAlmostEqual(e1[k], e2[k], places=5)
exepected_tensors = [[[11.11, 1.371, 2.652], [1.371, 3.635, -3.572], [2.652, -3.572, -14.746]],
[[11.11, -1.371, 2.652], [-1.371, 3.635, 3.572], [2.652, 3.572, -14.746]],
[[-3.098, 6.511, 7.732], [6.511, 1.419, 11.445], [7.732, 11.445, 1.678]],
[[-3.098, -6.511, 7.732], [-6.511, 1.419, -11.445], [7.732, -11.445, 1.678]],
[[2.344, -10.775, -7.006], [-10.775, -7.152, -11.309], [-7.006, -11.309, 4.808]],
[[2.344, 10.775, -7.006], [10.775, -7.152, 11.309], [-7.006, 11.309, 4.808]],
[[2.404, -0.588, -6.83], [-0.588, 10.435, 3.159], [-6.83, 3.159, -12.839]],
[[2.404, 0.588, -6.83], [0.588, 10.435, -3.159], [-6.83, -3.159, -12.839]]]
self.assertEqual(len(outcar.data["unsym_efg_tensor"][2:10]), len(exepected_tensors))
for e1, e2 in zip(outcar.data["unsym_efg_tensor"][2:10], exepected_tensors):
self.assertArrayAlmostEqual(e1, e2)
def test_read_fermi_contact_shift(self):
filepath = os.path.join(test_dir, "OUTCAR_fc")
outcar = Outcar(filepath)
outcar.read_fermi_contact_shift()
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'fch'][0][0],
-0.002)
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'th'][0][0],
-0.052)
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'dh'][0][0],
0.0)
def test_drift(self):
outcar = Outcar(os.path.join(test_dir, "OUTCAR"))
self.assertEqual(len(outcar.drift), 5)
self.assertAlmostEqual(np.sum(outcar.drift), 0)
outcar = Outcar(os.path.join(test_dir, "OUTCAR.CL"))
self.assertEqual(len(outcar.drift), 79)
self.assertAlmostEqual(np.sum(outcar.drift), 0.448010)
def test_electrostatic_potential(self):
outcar = Outcar(os.path.join(test_dir, "OUTCAR"))
self.assertEqual(outcar.ngf, [54, 30, 54])
self.assertTrue(
np.allclose(outcar.sampling_radii, [0.9748, 0.9791, 0.7215]))
self.assertTrue(np.allclose(outcar.electrostatic_potential,
[-26.0704, -45.5046, -45.5046, -72.9539,
-73.0621, -72.9539, -73.0621]))
class BSVasprunTest(unittest.TestCase):
def test_get_band_structure(self):
filepath = os.path.join(test_dir, 'vasprun_Si_bands.xml')
vasprun = BSVasprun(filepath, parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=os.path.join(test_dir,
'KPOINTS_Si_bands'))
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64])
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
d = vasprun.as_dict()
self.assertIn("eigenvalues", d["output"])
class OszicarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'OSZICAR')
oszicar = Oszicar(filepath)
self.assertEqual(len(oszicar.electronic_steps),
len(oszicar.ionic_steps))
self.assertEqual(len(oszicar.all_energies), 60)
self.assertAlmostEqual(oszicar.final_energy, -526.63928)
class LocpotTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'LOCPOT')
locpot = Locpot.from_file(filepath)
self.assertAlmostEqual(-217.05226954,
sum(locpot.get_average_along_axis(0)))
self.assertAlmostEqual(locpot.get_axis_grid(0)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(1)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(2)[-1], 2.87629, 2)
class ChgcarTest(PymatgenTest):
def test_init(self):
filepath = os.path.join(test_dir, 'CHGCAR.nospin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 2)[0, 1], 0)
filepath = os.path.join(test_dir, 'CHGCAR.spin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022)
# test sum
chg += chg
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022 * 2)
filepath = os.path.join(test_dir, 'CHGCAR.Fe3O4')
chg = Chgcar.from_file(filepath)
ans = [1.56472768, 3.25985108, 3.49205728, 3.66275028, 3.8045896,
5.10813352]
myans = chg.get_integrated_diff(0, 3, 6)
self.assertTrue(np.allclose(myans[:, 1], ans))
def test_write(self):
filepath = os.path.join(test_dir, 'CHGCAR.spin')
chg = Chgcar.from_file(filepath)
chg.write_file("CHGCAR_pmg")
with open("CHGCAR_pmg") as f:
for i, line in enumerate(f):
if i == 22130:
self.assertEqual("augmentation occupancies 1 15\n", line)
if i == 44255:
self.assertEqual("augmentation occupancies 1 15\n", line)
os.remove("CHGCAR_pmg")
def test_soc_chgcar(self):
filepath = os.path.join(test_dir, "CHGCAR.NiO_SOC.gz")
chg = Chgcar.from_file(filepath)
self.assertEqual(set(chg.data.keys()),
{'total', 'diff_x', 'diff_y', 'diff_z', 'diff'})
self.assertTrue(chg.is_soc)
self.assertEqual(chg.data['diff'].shape, chg.data['diff_y'].shape)
# check our construction of chg.data['diff'] makes sense
# this has been checked visually too and seems reasonable
self.assertEqual(abs(chg.data['diff'][0][0][0]),
np.linalg.norm([chg.data['diff_x'][0][0][0],
chg.data['diff_y'][0][0][0],
chg.data['diff_z'][0][0][0]]))
# and that the net magnetization is about zero
# note: we get ~ 0.08 here, seems a little high compared to
# vasp output, but might be due to chgcar limitations?
self.assertAlmostEqual(chg.net_magnetization, 0.0, places=0)
chg.write_file("CHGCAR_pmg_soc")
chg_from_file = Chgcar.from_file("CHGCAR_pmg_soc")
self.assertTrue(chg_from_file.is_soc)
os.remove("CHGCAR_pmg_soc")
def test_hdf5(self):
chgcar = Chgcar.from_file(os.path.join(test_dir, "CHGCAR.NiO_SOC.gz"))
chgcar.to_hdf5("chgcar_test.hdf5")
import h5py
with h5py.File("chgcar_test.hdf5", "r") as f:
self.assertArrayAlmostEqual(np.array(f["vdata"]["total"]),
chgcar.data["total"])
self.assertArrayAlmostEqual(np.array(f["vdata"]["diff"]),
chgcar.data["diff"])
self.assertArrayAlmostEqual(np.array(f["lattice"]),
chgcar.structure.lattice.matrix)
self.assertArrayAlmostEqual(np.array(f["fcoords"]),
chgcar.structure.frac_coords)
for z in f["Z"]:
self.assertIn(z, [Element.Ni.Z, Element.O.Z])
for sp in f["species"]:
self.assertIn(sp, ["Ni", "O"])
chgcar2 = Chgcar.from_hdf5("chgcar_test.hdf5")
self.assertArrayAlmostEqual(chgcar2.data["total"],
chgcar.data["total"])
os.remove("chgcar_test.hdf5")
class ProcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'PROCAR.simple')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'd')[Spin.up], 0)
self.assertAlmostEqual(p.get_occupation(0, 's')[Spin.up],
0.35381249999999997)
self.assertAlmostEqual(p.get_occupation(0, 'p')[Spin.up], 1.19540625)
self.assertRaises(ValueError, p.get_occupation, 1, 'm')
self.assertEqual(p.nbands, 10)
self.assertEqual(p.nkpoints, 10)
self.assertEqual(p.nions, 3)
lat = Lattice.cubic(3.)
s = Structure(lat, ["Li", "Na", "K"], [[0., 0., 0.],
[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75]])
d = p.get_projection_on_elements(s)
self.assertAlmostEqual(d[Spin.up][2][2],
{'Na': 0.042, 'K': 0.646, 'Li': 0.042})
filepath = os.path.join(test_dir, 'PROCAR')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'dxy')[Spin.up],
0.96214813853000025)
self.assertAlmostEqual(p.get_occupation(0, 'dxy')[Spin.down],
0.85796295426000124)
def test_phase_factors(self):
filepath = os.path.join(test_dir, 'PROCAR.phase')
p = Procar(filepath)
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0],
-0.746 + 0.099j)
self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 0, 0],
0.372 - 0.654j)
# Two Li should have same phase factor.
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0],
p.phase_factors[Spin.up][0, 0, 1, 0])
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 2, 0],
-0.053 + 0.007j)
self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 2, 0],
0.027 - 0.047j)
class XdatcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'XDATCAR_4')
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
filepath = os.path.join(test_dir, 'XDATCAR_5')
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
x.concatenate(os.path.join(test_dir, 'XDATCAR_4'))
self.assertEqual(len(x.structures), 8)
self.assertIsNotNone(x.get_string())
class DynmatTest(unittest.TestCase):
def test_init(self):
# nosetests pymatgen/io/vasp/tests/test_outputs.py:DynmatTest.test_init
filepath = os.path.join(test_dir, 'DYNMAT')
d = Dynmat(filepath)
self.assertEqual(d.nspecs, 2)
self.assertEqual(d.natoms, 6)
self.assertEqual(d.ndisps, 3)
self.assertTrue(np.allclose(d.masses, [63.546, 196.966]))
self.assertTrue(4 in d.data)
self.assertTrue(2 in d.data[4])
self.assertTrue(np.allclose(
d.data[4][2]['dispvec'], [0., 0.05, 0.]
))
self.assertTrue(np.allclose(
d.data[4][2]['dynmat'][3], [0.055046, -0.298080, 0.]
))
# TODO: test get_phonon_frequencies once cross-checked
class WavecarTest(unittest.TestCase):
def setUp(self):
self.w = Wavecar(os.path.join(test_dir, 'WAVECAR.N2'))
self.a = np.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
self.vol = np.dot(self.a[0, :], np.cross(self.a[1, :], self.a[2, :]))
self.b = np.array([np.cross(self.a[1, :], self.a[2, :]),
np.cross(self.a[2, :], self.a[0, :]),
np.cross(self.a[0, :], self.a[1, :])])
self.b = 2 * np.pi * self.b / self.vol
def test_init(self):
self.assertEqual(self.w.filename, os.path.join(test_dir, 'WAVECAR.N2'))
self.assertAlmostEqual(self.w.efermi, -5.7232, places=4)
self.assertEqual(self.w.encut, 25)
self.assertEqual(self.w.nb, 9)
self.assertEqual(self.w.nk, 1)
self.assertTrue(np.allclose(self.w.a, self.a))
self.assertTrue(np.allclose(self.w.b, self.b))
self.assertAlmostEqual(self.w.vol, self.vol)
self.assertEqual(len(self.w.kpoints), self.w.nk)
self.assertEqual(len(self.w.coeffs), self.w.nk)
self.assertEqual(len(self.w.coeffs[0]), self.w.nb)
self.assertEqual(len(self.w.band_energy), self.w.nk)
self.assertEqual(self.w.band_energy[0].shape, (self.w.nb, 3))
self.assertLessEqual(len(self.w.Gpoints[0]), 257)
for k in range(self.w.nk):
for b in range(self.w.nb):
self.assertEqual(len(self.w.coeffs[k][b]),
len(self.w.Gpoints[k]))
with self.assertRaises(ValueError):
Wavecar(os.path.join(test_dir, 'WAVECAR.N2.malformed'))
import sys
from io import StringIO
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
Wavecar(os.path.join(test_dir, 'WAVECAR.N2'), verbose=True)
self.assertNotEqual(out.getvalue().strip(), '')
finally:
sys.stdout = saved_stdout
self.w = Wavecar(os.path.join(test_dir, 'WAVECAR.N2.45210'))
self.assertEqual(self.w.filename, os.path.join(test_dir,
'WAVECAR.N2.45210'))
self.assertAlmostEqual(self.w.efermi, -5.7232, places=4)
self.assertEqual(self.w.encut, 25)
self.assertEqual(self.w.nb, 9)
self.assertEqual(self.w.nk, 1)
self.assertTrue(np.allclose(self.w.a, self.a))
self.assertTrue(np.allclose(self.w.b, self.b))
self.assertAlmostEqual(self.w.vol, self.vol)
self.assertEqual(len(self.w.kpoints), self.w.nk)
self.assertEqual(len(self.w.coeffs), self.w.nk)
self.assertEqual(len(self.w.coeffs[0]), self.w.nb)
self.assertEqual(len(self.w.band_energy), self.w.nk)
self.assertEqual(self.w.band_energy[0].shape, (self.w.nb, 3))
self.assertLessEqual(len(self.w.Gpoints[0]), 257)
with self.assertRaises(ValueError):
Wavecar(os.path.join(test_dir, 'WAVECAR.N2.spin'))
temp_ggp = Wavecar._generate_G_points
try:
Wavecar._generate_G_points = lambda x, y: []
with self.assertRaises(ValueError):
Wavecar(os.path.join(test_dir, 'WAVECAR.N2'))
finally:
Wavecar._generate_G_points = temp_ggp
def test__generate_nbmax(self):
self.w._generate_nbmax()
self.assertEqual(self.w._nbmax.tolist(), [5, 5, 5])
def test__generate_G_points(self):
for k in range(self.w.nk):
kp = self.w.kpoints[k]
self.assertLessEqual(len(self.w._generate_G_points(kp)), 257)
def test_evaluate_wavefunc(self):
self.w.Gpoints.append(np.array([0, 0, 0]))
self.w.kpoints.append(np.array([0, 0, 0]))
self.w.coeffs.append([[1 + 1j]])
self.assertAlmostEqual(self.w.evaluate_wavefunc(-1, -1, [0, 0, 0]),
(1 + 1j) / np.sqrt(self.vol), places=4)
self.assertAlmostEqual(self.w.evaluate_wavefunc(0, 0, [0, 0, 0]),
np.sum(self.w.coeffs[0][0]) / np.sqrt(self.vol),
places=4)
def test_fft_mesh(self):
mesh = self.w.fft_mesh(0, 5)
ind = np.argmax(np.abs(mesh))
self.assertEqual(np.unravel_index(ind, mesh.shape), (14, 1, 1))
self.assertEqual(mesh[tuple((self.w.ng / 2).astype(np.int))], 0j)
mesh = self.w.fft_mesh(0, 5, shift=False)
ind = np.argmax(np.abs(mesh))
self.assertEqual(np.unravel_index(ind, mesh.shape), (6, 8, 8))
self.assertEqual(mesh[0, 0, 0], 0j)
if __name__ == "__main__":
unittest.main()
| gpetretto/pymatgen | pymatgen/io/vasp/tests/test_outputs.py | Python | mit | 56,558 | [
"VASP",
"pymatgen"
] | f1f862516992754fcff07d10a7d527782613a30c7e0d3a741f71c84473691b77 |
#!/bin/env python
#-------------------------------------------------------------------------------
#
# Filename : all_trunc_gaus.py
# Description : Matplotlib file for generating allthe various Truncated
# Gaussians determined describe in the article
# Author : Yi-Mu "Enoch" Chen [ ensc@hep1.phys.ntu.edu.tw ]
#
#
#-------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spc
from scipy.integrate import quad
from scipy.optimize import ridder
from scipy.optimize import root
from scipy.optimize import minimize_scalar
def gaussian( x, mu ,sigma ):
return np.exp( -(x-mu)*(x-mu)/(2*sigma**2)) / (sigma*np.sqrt(2*np.pi))
def gaussian_cdf( x, mu, sigma):
return 1/2 * (1 + spc.erf((x-mu)/(np.sqrt(2)*sigma)))
def TrucGaus(x,mu,sigma):
return gaussian(x,mu,sigma) / (gaussian_cdf(100,mu,sigma) - gaussian_cdf(0,mu,sigma) )
def expval(mu,sigma):
def intgrad(x,mu,sigma):
return x*TrucGaus(x,mu,sigma)
return quad( intgrad, 0, 100, args=(mu,sigma))[0]
def varval(mu,sigma):
def intgrad(x,mu,sigma):
ex = expval(mu,sigma)
return (x-ex)*(x-ex) * TrucGaus(x,mu,sigma)
return np.sqrt(quad( intgrad, 0, 100, args=(mu,sigma))[0])
x = np.linspace(0,100,101,endpoint=True)
def fsolve1(x):
return varval(80,x) - 20
def fsolve2(x):
return expval(x,20) - 80
def fsolve3(x):
mu,sigma = x[0], x[1]
return [expval(mu,sigma)-80,varval(mu,sigma)-20]
mu = [0,0,0,0]
sigma = [0,0,0,0]
mu[0],sigma[0] = 80,20
mu[1],sigma[1] = 80,ridder(fsolve1,10,30)
mu[2],sigma[2] = ridder(fsolve2,60,95),20
solved = root(fsolve3,[100,50],tol=10**-8).x
mu[3],sigma[3] = solved[0],solved[1]
def ratio(mu,sigma):
return 100*quad(TrucGaus,0,65,args=(mu,sigma))[0]
for i in range(0,4):
plt.plot( x,
TrucGaus(x,mu[i],sigma[i]),
label=r"$G_{{T,{}}}:\mu={:.1f}, \sigma={:.1f}, e={:.1f}, \sqrt{{v}}={:.1f}$, Below 65pts={:.1f}%".format(
i,
mu[i],sigma[i],
expval(mu[i],sigma[i]),
varval(mu[i],sigma[i]),
ratio(mu[i],sigma[i])
)
)
plt.xlabel("Score")
plt.ylabel("Probability density")
plt.xlim(0,100)
plt.ylim(ymin=0)
plt.legend()
plt.tight_layout()
plt.savefig('all_trunc_gauss.png')
for i in range(0,4):
for j in range(i+1,4):
def dist(x):
firstcdf = quad(TrucGaus,0,x,args=(mu[i],sigma[i]))[0]
secondcdf = quad(TrucGaus,0,x,args=(mu[j],sigma[j]))[0]
return -(firstcdf-secondcdf)**2
maxx = minimize_scalar( dist, [0,100], method='bounded',bounds=[0,100]).x
distmax = np.sqrt(-dist(maxx))
print(i,j,maxx,(spc.kolmogorov(distmax)/distmax)**2)
| enochnotsocool/enochnotsocool.github.io | images/genimage/stat_infer/all_trunc_gaus.py | Python | mit | 2,776 | [
"Gaussian"
] | 2c4f8c7d8adbbd8fe7ed394252256f5a9b926c76f6d6c6341a10e9e878dad1da |
#!/usr/bin/env python
# Created: Wed May 29 08:07:18 2002
# thomas@cbs.dtu.dk, Cecilia.Alsmark@ebc.uu.se
# Copyright 2001 by Thomas Sicheritz-Ponten and Cecilia Alsmark.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Miscellaneous functions for dealing with sequences."""
import re, time
from Bio import SeqIO
from Bio.Seq import Seq
from Bio import Alphabet
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData, CodonTable
######################################
# DNA
######################
# {{{
def GC(seq):
"""Calculates G+C content, returns the percentage (float between 0 and 100).
Copes mixed case sequences, and with the ambiguous nucleotide S (G or C)
when counting the G and C content. The percentage is calculated against
the full length, e.g.:
>>> from Bio.SeqUtils import GC
>>> GC("ACTGN")
40.0
Note that this will return zero for an empty sequence.
"""
try:
gc = sum(map(seq.count,['G','C','g','c','S','s']))
return gc*100.0/len(seq)
except ZeroDivisionError:
return 0.0
def GC123(seq):
"""Calculates total G+C content plus first, second and third positions.
Returns a tuple of four floats (percentages between 0 and 100) for the
entire sequence, and the three codon positions. e.g.
>>> from Bio.SeqUtils import GC123
>>> GC123("ACTGTN")
(40.0, 50.0, 50.0, 0.0)
Copes with mixed case sequences, but does NOT deal with ambiguous
nucleotides.
"""
d= {}
for nt in ['A','T','G','C']:
d[nt] = [0,0,0]
for i in range(0,len(seq),3):
codon = seq[i:i+3]
if len(codon) <3: codon += ' '
for pos in range(0,3):
for nt in ['A','T','G','C']:
if codon[pos] == nt or codon[pos] == nt.lower():
d[nt][pos] += 1
gc = {}
gcall = 0
nall = 0
for i in range(0,3):
try:
n = d['G'][i] + d['C'][i] +d['T'][i] + d['A'][i]
gc[i] = (d['G'][i] + d['C'][i])*100.0/n
except:
gc[i] = 0
gcall = gcall + d['G'][i] + d['C'][i]
nall = nall + n
gcall = 100.0*gcall/nall
return gcall, gc[0], gc[1], gc[2]
def GC_skew(seq, window = 100):
"""Calculates GC skew (G-C)/(G+C) for multuple windows along the sequence.
Returns a list of ratios (floats), controlled by the length of the sequence
and the size of the window.
Does NOT look at any ambiguous nucleotides.
"""
# 8/19/03: Iddo: added lowercase
values = []
for i in range(0, len(seq), window):
s = seq[i: i + window]
g = s.count('G') + s.count('g')
c = s.count('C') + s.count('c')
skew = (g-c)/float(g+c)
values.append(skew)
return values
from math import pi, sin, cos, log
def xGC_skew(seq, window = 1000, zoom = 100,
r = 300, px = 100, py = 100):
"""Calculates and plots normal and accumulated GC skew (GRAPHICS !!!)."""
from Tkinter import Scrollbar, Canvas, BOTTOM, BOTH, ALL, \
VERTICAL, HORIZONTAL, RIGHT, LEFT, X, Y
yscroll = Scrollbar(orient = VERTICAL)
xscroll = Scrollbar(orient = HORIZONTAL)
canvas = Canvas(yscrollcommand = yscroll.set,
xscrollcommand = xscroll.set, background = 'white')
win = canvas.winfo_toplevel()
win.geometry('700x700')
yscroll.config(command = canvas.yview)
xscroll.config(command = canvas.xview)
yscroll.pack(side = RIGHT, fill = Y)
xscroll.pack(side = BOTTOM, fill = X)
canvas.pack(fill=BOTH, side = LEFT, expand = 1)
canvas.update()
X0, Y0 = r + px, r + py
x1, x2, y1, y2 = X0 - r, X0 + r, Y0 -r, Y0 + r
ty = Y0
canvas.create_text(X0, ty, text = '%s...%s (%d nt)' % (seq[:7], seq[-7:], len(seq)))
ty +=20
canvas.create_text(X0, ty, text = 'GC %3.2f%%' % (GC(seq)))
ty +=20
canvas.create_text(X0, ty, text = 'GC Skew', fill = 'blue')
ty +=20
canvas.create_text(X0, ty, text = 'Accumulated GC Skew', fill = 'magenta')
ty +=20
canvas.create_oval(x1,y1, x2, y2)
acc = 0
start = 0
for gc in GC_skew(seq, window):
r1 = r
acc+=gc
# GC skew
alpha = pi - (2*pi*start)/len(seq)
r2 = r1 - gc*zoom
x1 = X0 + r1 * sin(alpha)
y1 = Y0 + r1 * cos(alpha)
x2 = X0 + r2 * sin(alpha)
y2 = Y0 + r2 * cos(alpha)
canvas.create_line(x1,y1,x2,y2, fill = 'blue')
# accumulated GC skew
r1 = r - 50
r2 = r1 - acc
x1 = X0 + r1 * sin(alpha)
y1 = Y0 + r1 * cos(alpha)
x2 = X0 + r2 * sin(alpha)
y2 = Y0 + r2 * cos(alpha)
canvas.create_line(x1,y1,x2,y2, fill = 'magenta')
canvas.update()
start += window
canvas.configure(scrollregion = canvas.bbox(ALL))
def molecular_weight(seq):
"""Calculate the molecular weight of a DNA sequence."""
if type(seq) == type(''): seq = Seq(seq, IUPAC.unambiguous_dna)
weight_table = IUPACData.unambiguous_dna_weights
return sum(weight_table[x] for x in seq)
def nt_search(seq, subseq):
"""Search for a DNA subseq in sequence.
use ambiguous values (like N = A or T or C or G, R = A or G etc.)
searches only on forward strand
"""
pattern = ''
for nt in subseq:
value = IUPACData.ambiguous_dna_values[nt]
if len(value) == 1:
pattern += value
else:
pattern += '[%s]' % value
pos = -1
result = [pattern]
l = len(seq)
while True:
pos+=1
s = seq[pos:]
m = re.search(pattern, s)
if not m: break
pos += int(m.start(0))
result.append(pos)
return result
# }}}
######################################
# Protein
######################
# {{{
def seq3(seq):
"""Turn a one letter code protein sequence into one with three letter codes.
The single input argument 'seq' should be a protein sequence using single
letter codes, either as a python string or as a Seq or MutableSeq object.
This function returns the amino acid sequence as a string using the three
letter amino acid codes. Output follows the IUPAC standard (including
ambiguous characters B for "Asx", J for "Xle" and X for "Xaa", and also U
for "Sel" and O for "Pyl") plus "Ter" for a terminator given as an asterisk.
Any unknown character (including possible gap characters), is changed into
'Xaa'.
e.g.
>>> from Bio.SeqUtils import seq3
>>> seq3("MAIVMGRWKGAR*")
'MetAlaIleValMetGlyArgTrpLysGlyAlaArgTer'
This function was inspired by BioPerl's seq3.
"""
threecode = {'A':'Ala', 'B':'Asx', 'C':'Cys', 'D':'Asp',
'E':'Glu', 'F':'Phe', 'G':'Gly', 'H':'His',
'I':'Ile', 'K':'Lys', 'L':'Leu', 'M':'Met',
'N':'Asn', 'P':'Pro', 'Q':'Gln', 'R':'Arg',
'S':'Ser', 'T':'Thr', 'V':'Val', 'W':'Trp',
'Y':'Tyr', 'Z':'Glx', 'X':'Xaa', '*':'Ter',
'U':'Sel', 'O':'Pyl', 'J':'Xle',
}
#We use a default of 'Xaa' for undefined letters
#Note this will map '-' to 'Xaa' which may be undesirable!
return ''.join([threecode.get(aa,'Xaa') for aa in seq])
# }}}
######################################
# Mixed ???
######################
# {{{
def six_frame_translations(seq, genetic_code = 1):
"""Formatted string showing the 6 frame translations and GC content.
nice looking 6 frame translation with GC content - code from xbbtools
similar to DNA Striders six-frame translation
e.g.
from Bio.SeqUtils import six_frame_translations
print six_frame_translations("AUGGCCAUUGUAAUGGGCCGCUGA")
"""
from Bio.Seq import reverse_complement, translate
anti = reverse_complement(seq)
comp = anti[::-1]
length = len(seq)
frames = {}
for i in range(0,3):
frames[i+1] = translate(seq[i:], genetic_code)
frames[-(i+1)] = reverse(translate(anti[i:], genetic_code))
# create header
if length > 20:
short = '%s ... %s' % (seq[:10], seq[-10:])
else:
short = seq
#TODO? Remove the date as this would spoil any unit test...
date = time.strftime('%y %b %d, %X', time.localtime(time.time()))
header = 'GC_Frame: %s, ' % date
for nt in ['a','t','g','c']:
header += '%s:%d ' % (nt, seq.count(nt.upper()))
header += '\nSequence: %s, %d nt, %0.2f %%GC\n\n\n' % (short.lower(),length, GC(seq))
res = header
for i in range(0,length,60):
subseq = seq[i:i+60]
csubseq = comp[i:i+60]
p = i/3
res = res + '%d/%d\n' % (i+1, i/3+1)
res = res + ' ' + ' '.join(map(None,frames[3][p:p+20])) + '\n'
res = res + ' ' + ' '.join(map(None,frames[2][p:p+20])) + '\n'
res = res + ' '.join(map(None,frames[1][p:p+20])) + '\n'
# seq
res = res + subseq.lower() + '%5d %%\n' % int(GC(subseq))
res = res + csubseq.lower() + '\n'
# - frames
res = res + ' '.join(map(None,frames[-2][p:p+20])) +' \n'
res = res + ' ' + ' '.join(map(None,frames[-1][p:p+20])) + '\n'
res = res + ' ' + ' '.join(map(None,frames[-3][p:p+20])) + '\n\n'
return res
# }}}
######################################
# FASTA file utilities
######################
# {{{
def quick_FASTA_reader(file):
"""Simple FASTA reader, returning a list of string tuples.
The single argument 'file' should be the filename of a FASTA format file.
This function will open and read in the entire file, constructing a list
of all the records, each held as a tuple of strings (the sequence name or
title, and its sequence).
This function was originally intended for use on large files, where its
low overhead makes it very fast. However, because it returns the data as
a single in memory list, this can require a lot of RAM on large files.
You are generally encouraged to use Bio.SeqIO.parse(handle, "fasta") which
allows you to iterate over the records one by one (avoiding having all the
records in memory at once). Using Bio.SeqIO also makes it easy to switch
between different input file formats. However, please note that rather
than simple strings, Bio.SeqIO uses SeqRecord objects for each record.
"""
#Want to split on "\n>" not just ">" in case there are any extra ">"
#in the name/description. So, in order to make sure we also split on
#the first entry, prepend a "\n" to the start of the file.
handle = open(file)
txt = "\n" + handle.read()
handle.close()
entries = []
for entry in txt.split('\n>')[1:]:
name,seq= entry.split('\n',1)
seq = seq.replace('\n','').replace(' ','').upper()
entries.append((name, seq))
return entries
# }}}
def _test():
"""Run the Bio.SeqUtils module's doctests (PRIVATE)."""
print "Runing doctests..."
import doctest
doctest.testmod()
print "Done"
if __name__ == "__main__":
_test()
| bryback/quickseq | genescript/Bio/SeqUtils/__init__.py | Python | mit | 11,238 | [
"BioPerl",
"Biopython"
] | 0bd191912d36d7017a363cbe81926c9b496a634e0a737c7285b2b0fa042b175b |
"""
@name: PyHouse/src/Modules/_test/test_Computer.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2018 by D. Brian Kimmel
@license: MIT License
@note: Created on Jul 29, 2015
@Summary:
"""
__updated__ = '2018-02-12'
from twisted.trial import unittest, reporter, runner
from Modules.Computer import test as I_test
class Z_Suite(unittest.TestCase):
def setUp(self):
self.m_test = runner.TestLoader()
def test_Computer(self):
l_package = runner.TestLoader().loadPackage(I_test)
l_ret = reporter.Reporter()
l_package.run(l_ret)
l_ret.done()
#
print('\n====================\n*** test_Computer ***\n{}\n'.format(l_ret))
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/_test/test_Computer.py | Python | mit | 753 | [
"Brian"
] | 606566c65a1b16dbda295e4a45dc9a1c9e5a22197815cb8366276c906dc369a1 |
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of the Bacterial and Archaeal Genome Analyser
# Copyright (C) 2015-16 David Williams
# david.williams.at.liv.d-dub.org.uk
# License GPLv3+: GNU GPL version 3 or later
# This is free software: you are free to change and redistribute it
# There is NO WARRANTY, to the extent permitted by law
#
# Work on this software was started at The University of Liverpool, UK
# with funding from The Wellcome Trust (093306/Z/10) awarded to:
# Dr Steve Paterson (The University of Liverpool, UK)
# Dr Craig Winstanley (The University of Liverpool, UK)
# Dr Michael A Brockhurst (The University of York, UK)
#
'''
CallVariants module from the Bacterial and Archaeal Genome Analyzer (BAGA).
This module contains functions to infer single nucleotide polymorphisms,
insertions and deletions from short reads mapped to a reference genome using
the Genome Analysis Toolkit from the Broad Institute.
'''
# stdlib
from time import sleep as _sleep
from cStringIO import StringIO as _StringIO
from random import sample as _sample
from collections import OrderedDict as _OrderedDict
from collections import defaultdict as _defaultdict
from collections import Counter as _Counter
from glob import glob as _glob
from baga import _subprocess
from baga import _os
from baga import _multiprocessing
from baga import _cPickle
from baga import _gzip
from baga import _re
from baga import _tarfile
from baga import _array
from baga import _json
from baga import _time
from baga import _md5
# external Python modules
import pysam as _pysam
from Bio import SeqIO as _SeqIO
from Bio.Seq import Seq as _Seq
from Bio.SeqRecord import SeqRecord as _SeqRecord
from Bio.Data.CodonTable import TranslationError as _TranslationError
# package functions
from baga import decide_max_processes as _decide_max_processes
from baga import get_exe_path as _get_exe_path
from baga import report_time as _report_time
def main():
pass
def parseVCF(path_to_VCF):
'''returns (header, header_section_order, colnames, variants)'''
header_section_order = _OrderedDict()
header = _defaultdict(list)
pattern = _re.compile('##([^=]+)=(.+)$')
variants = []
collect_variants = False
for line in open(path_to_VCF):
if collect_variants:
variants += [line.rstrip()]
elif line[:6] == '#CHROM':
colnames = line.rstrip().split('\t')
collect_variants = True
else:
section, value = _re.match(pattern, line.rstrip()).groups()
header_section_order[section] = True
header[section] += [line.rstrip()]
header = dict(header)
header_section_order = list(header_section_order)
return(header, header_section_order, colnames, variants)
def dictify_vcf_header(header):
'''convert VCF header to dict using re for quotes'''
pattern1 = _re.compile(''',(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''')
pattern2 = _re.compile('''=(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''')
headerdict = {}
for headersection,headerrows in header.items():
for headerrow in headerrows:
if headerrow[len('##{}='.format(headersection))] == '<':
# split by commas ignoring quotes
thisheaderdict = _re.split(pattern1, headerrow[len('##{}=<'.format(headersection)):-1])
thisheaderdict = dict([_re.split(pattern2, i) for i in thisheaderdict])
try:
headerdict[headersection] += [thisheaderdict]
except KeyError:
headerdict[headersection] = [thisheaderdict]
return(headerdict)
def sortVariantsKeepFilter(header, colnames, variantrows):
'''
Given a VCF file contents divided up by CallVariants.parseVCF(), return a
dict for tabulation of variants in which each is marked with appropriate
filters as described in the VCF header.
Per sample filters stored in INFO columns must be described in header
##INFO entries and have Descriptions starting "FILTER:".
'''
# identify chromosome for this VCF <== this will fail if running against > 1 contigs . . .
pattern = _re.compile('##contig=<ID=([A-Za-z0-9_\.]+),length=([0-9]+)>')
# for multiple chromosomes iterate here
if 'contig' in header:
genome_ids = []
genome_lengths = []
for contig in header['contig']:
match = _re.match(pattern, contig)
if match is None:
raise Exception("Failed to identify which chromosome the variants were called on (couldn't find '##contig=' and/or recognise name)")
genome_id, genome_length = match.groups()
genome_lengths += [int(genome_length)]
genome_ids += [genome_id]
for genome_length, genome_id in zip(genome_lengths, genome_ids):
print('Variants were called against {:,} bp genome: {}\n'.format(genome_length, genome_id))
else:
print('WARNING: no contig information found in header (reference free?)')
parameter_names = colnames[:colnames.index('FORMAT')+1]
parameter_names[0] = parameter_names[0].lstrip('#')
sample_names = colnames[colnames.index('FORMAT')+1:]
headerdict = dictify_vcf_header(header)
FILTERfilters = set()
for FILTER in headerdict['FILTER']:
FILTERfilters.add(FILTER['ID'].strip('\'"'))
INFOfilters = set()
for INFO in headerdict['INFO']:
if INFO['Description'].strip('"')[:len('FILTER:')] == 'FILTER:':
INFOfilters.add(INFO['ID'].strip('\'"'))
allfilters = FILTERfilters | INFOfilters
if len(variantrows) == 0:
variants = {}
return(variants, allfilters)
cols = dict(zip(parameter_names, variantrows[0].rstrip().split('\t')[:len(parameter_names)]))
e = 'Could not find FORMAT column in this VCF file. Probably no genotype data.'
assert 'FORMAT' in cols, e
e = 'Could not find GT under FORMAT column as per http://samtools.github.io/hts-specs/VCFv4.2.pdf. Probably no genotype data.'
assert 'GT' in cols['FORMAT'], e
sample_variant_info = cols['FORMAT'].split(':')
# by sample chromosome position ref,query | filterlist
# three level automatic dicts
# sample chromosome position = [[ref,query], [filterlist]]
variants = _defaultdict(lambda: _defaultdict(lambda: _defaultdict(dict)))
for row in variantrows:
# variant info
cols = dict(zip(parameter_names, row.rstrip().split('\t')[:len(parameter_names)]))
# collect per sample filters for this row
INFO = dict([i.split('=') for i in cols['INFO'].split(';') if '=' in i])
samples_filtered = {}
for f in INFOfilters:
if f in INFO:
indexes = map(int, INFO[f].split(','))
for i in indexes:
try:
samples_filtered[sample_names[i]].add(f)
except KeyError:
samples_filtered[sample_names[i]] = set([f])
# collect sample wide filters
thesefilterflags = set(cols['FILTER'].split(',')) - set(['PASS'])
for sample_name in sample_names:
try:
samples_filtered[sample_name].update(thesefilterflags)
except KeyError:
samples_filtered[sample_name] = thesefilterflags
# sample info
sample_data = dict(zip(sample_names, row.rstrip().split('\t')[len(parameter_names):]))
sample_data = dict([(s, dict(zip(sample_variant_info, d.split(':')))) for s,d in sample_data.items()])
for sample,data in sample_data.items():
if data['GT'] != '.':
# there can be more than one variant per row
# rare for SNPs but can happen for indels
freqs = _Counter()
alleles = cols['ALT'].split(',')
allele_nums = map(int,data['GT'].split('/'))
for allele_num in allele_nums:
if allele_num > 0:
freqs[alleles[allele_num-1]] += 1
for ALT,freq in freqs.items():
if len(allele_nums) == 1:
# store ALT as single variant character
variants[sample][cols['CHROM']][int(cols['POS'])] = [[cols['REF'], ALT], samples_filtered[sample]]
else:
# store ALT as tuple of variant character, frequency, population_size
variants[sample][cols['CHROM']][int(cols['POS'])] = [[cols['REF'], (ALT,freq,len(allele_nums))], samples_filtered[sample]]
elif data['GT'] == '.':
# store ALT as '.' meaning insufficient information to call variant
variants[sample][cols['CHROM']][int(cols['POS'])] = [[cols['REF'], '.'], samples_filtered[sample]]
# convert nested defaultdicts to dicts
variants = dict(variants)
for k1,v1 in variants.items():
variants[k1] = dict(v1)
for k2,v2 in variants[k1].items():
variants[k1][k2] = dict(v2)
return(variants, allfilters)
def sortAmongBetweenReference(variants, sample_size):
'''Separate "among sample" and "to reference" variants
Takes a dictionary produced by .sortVariantsKeepFilter() as input, returns two
of the same shape in a single dictionary
'''
variant_freqs = _Counter()
for sample, chromosomes in variants.items():
# print('==> checking {}'.format(sample))
for chromosome, positions in chromosomes.items():
# iterate through variants by position
for position, ((reference,query),filters) in sorted(positions.items()):
variant_freqs[chromosome,position,query] += 1
# sort by frequency
among = set()
to_ref = set()
for info,freq in variant_freqs.items():
if freq < sample_size:
among.add(info)
else:
to_ref.add(info)
print('among: {}; to reference: {}'.format(len(among),len(to_ref)))
# divide up
variants_among = {}
variants_to_ref = {}
for sample, chromosomes in variants.items():
variants_among[sample] = {}
variants_to_ref[sample] = {}
for chromosome, positions in chromosomes.items():
variants_among[sample][chromosome] = {}
variants_to_ref[sample][chromosome] = {}
# iterate through variants by position
for position, ((reference,query),filters) in sorted(positions.items()):
if (chromosome,position,query) in among:
variants_among[sample][chromosome][position] = ((reference,query),filters)
elif (chromosome,position,query) in to_ref:
variants_to_ref[sample][chromosome][position] = ((reference,query),filters)
else:
raise Exception('dividing variants between and among failed!')
return({'among':variants_among, 'to_reference':variants_to_ref})
def to_by_position_filtered(variants, filters_applied, summarise = True):
'''Sort variants by position and divide into non-filtered and filtered'''
by_position = _defaultdict(_Counter)
by_position_filtered = _defaultdict(_Counter)
for sample, chromosomes in variants.items():
for chromosome, positions in chromosomes.items():
# iterate through variants by position
for position, ((reference,query),filters) in sorted(positions.items()):
if len(filters & set(filters_applied)) == 0:
# retain variants without any filters flagged (of those we are interested in)
by_position[chromosome][(position,reference,query,None)] += 1
else:
for f in filters & set(filters_applied):
# also retain those with a filter flag, separately for each filter
by_position_filtered[chromosome][(position,reference,query,f)] += 1
if summarise:
for f1 in sorted(filters_applied):
print('-- {} --'.format(f1))
these = []
if len(variants):
for position,reference,query,f2 in sorted(by_position_filtered[chromosome]):
if f1 == f2:
these += ['{}: {} => {}, {}'.format(position,reference,query,f2)]
print('Total: {}'.format(len(these)))
print('\n'.join(these))
return(by_position,by_position_filtered)
def reportCumulative(filter_order, reference_id, VCFs, VCFs_indels = False):
'''
Generate simple table of cumulative totals after filters applied to a VCF file
This function parses VCF files already created by BAGA with various filters
applied and writes the total of each class of variant (SNP, indel) removed
by each filter to a .csv file.
'''
# cumulative affect of filters
## build table column names
colnames = ["Cumulative filters applied"]
# dataset == reads_names
for dataset in sorted(VCFs):
for v in ['SNPs', 'InDels']:
colnames += ["{} {}".format(v, dataset)]
variant_type_order = ['SNPs', 'InDels']
for v in variant_type_order:
colnames += ["{} all samples".format(v)]
# start with no filters
filters_applied_ordered = [()]
# then add some
for filtername in filter_order:
filters_applied_ordered += [short2fullnames[filtername]]
collect_baga_filters = [f for f in filter_order if 'GATK' not in f]
from glob import glob as _glob
# find actual VCFs . . find the VCFs with suffixes that match the requested filters
VCFs_use = {}
for dataset,varianttypes in sorted(VCFs.items()):
VCFs_use[dataset] = {}
for varianttype,fname in varianttypes.items():
VCFs_use[dataset][varianttype] = []
if not isinstance(fname, list):
# --calleach --calljoint
filenames = [fname]
else:
# --callsingles
filenames = fname
for filename in filenames:
bits = filename.split(_os.path.extsep)
pattern = _os.path.extsep.join(bits[:-1]) + '*' + bits[-1]
for checkthis in _glob(pattern):
filter_present = []
for fltr in collect_baga_filters:
if 'F_'+fltr in checkthis:
filter_present += [fltr]
if set(filter_present) >= set(collect_baga_filters):
# OK if additional filters included in a VCF
VCFs_use[dataset][varianttype] += [checkthis]
### need to know (i) how many samples per dataset which may span VCF files or may not . . .
# build table
cumulative_filters = set()
variant_groups = ('all', 'among', 'to_reference')
rows = {group_name:list() for group_name in variant_groups}
for filters in filters_applied_ordered:
cumulative_filters.update(filters)
this_row = {}
for group_name in variant_groups:
try:
this_row[group_name] = [filter_names[filters]]
except KeyError:
this_row[group_name] = ['None']
# must be saved as sets to only count variants once each
totals_by_type = {}
for group_name in variant_groups:
totals_by_type[group_name] = {}
for varianttype in variant_type_order:
totals_by_type[group_name][varianttype] = set()
for dataset,varianttypes in sorted(VCFs_use.items()):
print('dataset: {}'.format(dataset))
for varianttype in variant_type_order:
for filename in varianttypes[varianttype]:
header, header_section_order, these_colnames, variantrows = parseVCF(filename)
variants, allfilters = sortVariantsKeepFilter(header, these_colnames, variantrows)
# divide variants into those among sample only, those between sample
# and reference
variants_divided = sortAmongBetweenReference(variants, sample_size = len(these_colnames[9:]))
variants_divided['all'] = variants
# cumulative filters applied here
for group_name in variant_groups:
by_position, by_position_filtered = to_by_position_filtered(
variants_divided[group_name], cumulative_filters)
# reference_id is the chromosome ID
print('{} {}'.format(len(by_position[reference_id]), varianttype))
this_row[group_name] += [len(by_position[reference_id])]
totals_by_type[group_name][varianttype].update([info[0] for info in by_position[reference_id]])
# add totals for variant class in columns corresponding to variant_type_order
for group_name in variant_groups:
this_row[group_name] += [len(totals_by_type[group_name][varianttype]) for varianttype in variant_type_order]
rows[group_name] += [this_row[group_name]]
for group_name in variant_groups:
# just the totals by variant class
outfilename = 'Table_of_cumulative_variant_totals_as_filters_applied_{}.csv'.format(group_name)
print('Printing to {}'.format(outfilename))
with open(outfilename, 'w') as fout:
fout.write(','.join(['"{}"'.format(c) for c in colnames])+'\n')
for row in rows[group_name]:
fout.write(','.join(['"{}"'.format(row[0])]+[str(c) for c in row[1:]])+'\n')
def reportLists(include_filters, reference_id, VCFs, VCFs_indels = False):
'''
Generate simple table of variants and the filters applied from a VCF file
This function parses VCF files already created by BAGA with various filters
applied and writes each variant with names of filters if masked to a .csv
file.
'''
## Frequencies with filters applied divided by group that variants were called in
## Single position might appear more than once if affected by a filter in only
## some samples.
## arbitrary group e.g. within versus between clades?
## VCFs are currently divided as called so this is not easily implemented
## would need to specify a second list of groups
## build table column names
colnames = ["Position", "Reference", "Variant", "Frequency", "Sample Group", "Filter"]
# dataset == reads_name == Sample Group
# also include combined frequencies
variant_type_order = ['SNPs', 'InDels']
# GATK filters added upstream so are in all BAGA produced VCFs if GATK used
collect_baga_filters = [f for f in include_filters if 'GATK' not in f]
from glob import glob as _glob
# find actual VCFs . . find the VCFs with suffixes that match the requested filters
VCFs_use = {}
filters_per_VCF = {}
checked = []
for dataset,varianttypes in sorted(VCFs.items()):
VCFs_use[dataset] = {}
for varianttype,filename in varianttypes.items():
bits = filename.split(_os.path.extsep)
pattern = _os.path.extsep.join(bits[:-1]) + '*' + bits[-1]
print(pattern)
for checkthis in _glob(pattern):
filter_present = []
for fltr in include_filters:
if 'F_'+fltr in checkthis:
filter_present += [fltr]
checked += [checkthis]
if set(filter_present) >= set(collect_baga_filters):
# OK if additional filters included in a VCF
VCFs_use[dataset][varianttype] = checkthis
# retain requested filters present in this VCF
filters_per_VCF[checkthis] = set(filter_present) & set(collect_baga_filters)
print('Selecting: {}'.format(checkthis))
checked.sort()
assert len(filters_per_VCF) > 0, 'No suitable VCFs located for filters: {}. '\
'These were considered based on content of the CallVariants.CallerGATK '\
'object:\n{}'.format(', '.join(collect_baga_filters), '\n'.join(checked))
# build table
# expand multi-part filters
include_filters2 = [a for b in [short2fullnames[f] for f in include_filters] for a in b]
variant_groups = ('all', 'among', 'to_reference')
rows = {group_name:list() for group_name in variant_groups}
for reads_name,varianttypes in sorted(VCFs_use.items()):
print('=> Dataset: {}'.format(dataset))
for varianttype in variant_type_order:
print('==> Variant class: {}'.format(varianttype))
filename = varianttypes[varianttype]
header, header_section_order, these_colnames, variantrows = parseVCF(filename)
variants, allfilters = sortVariantsKeepFilter(header, these_colnames, variantrows)
# divide variants into those among sample only, those between sample
# and reference
variants_divided = sortAmongBetweenReference(variants, sample_size = len(these_colnames[9:]))
variants_divided['all'] = variants
# filters applied here
for group_name in variant_groups:
print('===> Filtered in variant group: {}'.format(group_name))
by_position, by_position_filtered = to_by_position_filtered(
variants_divided[group_name], include_filters2)
### '"Position","Reference","Variant","Frequency","Sample Group","Filter"' <======
# reference_id is the chromosome ID
for info,frequency in sorted(by_position[reference_id].items()):
position,ref_char,query,this_filter = info
rows[group_name] += ['{},"{}","{}",{},"{}","retained"'.format(
position,ref_char,query,frequency,reads_name)]
for filter_of_interest in include_filters2:
for info,frequency in sorted(by_position_filtered[reference_id].items()):
position,ref_char,query,this_filter = info
if filter_of_interest == this_filter:
rows[group_name] += ['{},"{}","{}",{},"{}","{}"'.format(
position,ref_char,query,frequency,reads_name,this_filter)]
for group_name in variant_groups:
# just the totals by variant class
outfilename = 'Table_of_variants_with_filters_if_masked_{}.csv'.format(group_name)
print('Printing to {}'.format(outfilename))
with open(outfilename, 'w') as fout:
fout.write(','.join(['"{}"'.format(c) for c in colnames])+'\n')
fout.write('\n'.join(rows[group_name])+'\n')
# hard coded information for known filter types
known_filters = {}
# per sample filters have custom INFO added to allow for
# sample-specific per-site filtering
known_filters['rearrangements'] = {}
# if dict of ranges e.g. for rearrangements regions extended
# by no or low read depth mapping, need a list here with each
# INFO entry
known_filters['rearrangements']['string'] = [
'##INFO=<ID=rearrangements1,Number=.,Type=Integer,Description="FILTER: Within a region affected by rearrangements between reference genome and sample. Sample-specific. INFO field is list of base-0 indexes for failed samples in column order">',
'##INFO=<ID=rearrangements2,Number=.,Type=Integer,Description="FILTER: Adjacent to region affected by rearrangements between reference genome and sample and with >50% without reads mapped, i.e., absent in query or possibly insufficient mapping quality. Sample-specific. INFO field is list of base-0 indexes for failed samples in column order">']
known_filters['rearrangements']['per_sample'] = True
# reference genome specific filters need a conventional FILTER entry so all variants at a position are excluded
known_filters['genome_repeats'] = {}
known_filters['genome_repeats']['string'] = [
'##FILTER=<ID=genome_repeats,Description="Within a long repeat unit in the reference genome">'
]
known_filters['genome_repeats']['per_sample'] = False
# non-BAGA filters (e.g., from GATK) that are worth knowing about
other_filters = {'GATK':('LowQual', 'standard_hard_filter')}
# determine order of filters to include
# GATK's LowQual and standard_hard_filter
short2fullnames = dict(other_filters.items())
short2fullnames['genome_repeats'] = ('genome_repeats',)
short2fullnames['rearrangements'] = ('rearrangements1', 'rearrangements2')
# names for table
filter_names = {
('LowQual', 'standard_hard_filter'): "GATK standard 'hard' filter",
('genome_repeats',): 'baga reference genome repeats',
('rearrangements1', 'rearrangements2'): 'baga genome rearrangements'
}
class CallerGATK:
'''
Wrapper around Broad Institute's Genome Analysis Tool Kit for variant calling
Requires a collection of short read datasets that have been aligned to the
same genome sequence in Sequence Alignment/Map (SAM) format for variant
calling using Genome Analysis Tool Kit (GATK).
'''
def __init__(self, alignments = False, baga = False):
'''
Initialise with:
a baga.AlignReads.SAMs object
or
path to a saved baga.CallVariants.CallerGATK object
'''
assert alignments or baga, 'Instantiate with alignments or the path to a '\
'previously saved CallerGATK'
assert not (alignments and baga), 'Instantiate with alignments OR the path '\
'to a previously saved CallerGATK!'
if alignments:
try:
self.ready_BAMs = alignments.ready_BAMs
except AttributeError:
print('ERROR: baga.CallVariants.CallerGATK needs a baga.AlignReads.SAMs '\
'object with a "ready_BAMs" attribute. This can be obtained with '\
'the "IndelRealignGATK()" method of the AlignReads module.')
try:
self.genome_sequence = alignments.genome_sequence
self.genome_id = alignments.genome_id
except AttributeError:
print('ERROR: baga.CallVariants.CallerGATK needs a baga.AlignReads.SAMs '\
'object with a "genome_sequence" attribute. This can be '\
'obtained by running all methods in the AlignReads module.')
elif baga:
with _tarfile.open(baga, "r:gz") as tar:
for member in tar:
contents = _StringIO(tar.extractfile(member).read())
try:
# either json serialised conventional objects
contents = _json.loads(contents.getvalue())
except ValueError:
#print('json failed: {}'.format(member.name))
# or longer python array.array objects
contents = _array('c', contents.getvalue())
setattr(self, member.name, contents)
def saveLocal(self, name):
'''
Save processed object info to a local compressed archive of json strings.
'name' should exclude extension: .baga will be added
'''
fileout = 'baga.CallVariants.CallerGATK-%s.baga' % name
with _tarfile.open(fileout, "w:gz") as tar:
print('Writing to {} . . . '.format(fileout))
for att_name, att in self.__dict__.items():
if isinstance(att, _array):
io = _StringIO(att.tostring())
io.seek(0, _os.SEEK_END)
length = io.tell()
io.seek(0)
thisone = _tarfile.TarInfo(name = att_name)
thisone.size = length
tar.addfile(tarinfo = thisone, fileobj = io)
else:
# try saving everything else here by jsoning
try:
io = _StringIO()
_json.dump(att, io)
io.seek(0, _os.SEEK_END)
length = io.tell()
io.seek(0)
thisone = _tarfile.TarInfo(name = att_name)
thisone.size = length
tar.addfile(tarinfo = thisone, fileobj = io)
except TypeError:
# ignore non-jsonable things like functions
# include unicodes, strings, lists etc etc
#print('omitting {}'.format(att_name))
pass
def CallVCFsGATK(self,
jar = ['external_programs', 'GenomeAnalysisTK', 'GenomeAnalysisTK.jar'],
local_variants_path = ['variants'],
use_java = 'java',
force = False,
mem_num_gigs = 8,
max_cpus = -1,
arguments = False):
'''
Part of GATK "Best Practices" for DNA sequencing variant calling
https://www.broadinstitute.org/gatk/guide/best-practices/?bpm=DNAseq
this method follows the single sample variant calling described here:
https://www.broadinstitute.org/gatk/guide/article?id=2803
not the joint genotyping (see .CallgVCFsGATK() and .GenotypeGVCFsGATK())
An output after this method is a list of str to the
paths of the VCF for each sample. Because there is a
recalibration step, the list of str is added to a list (of lists) here:
self.path_to_unfiltered_VCF
If this is a list of lists (not str), downstream steps can infer whether a
separate genotyping (not joint) analysis is being run.
max_cpus for this GATK module is "cpu threads per data thread"
'''
print(self.genome_id)
genome_fna = 'genome_sequences/%s.fna' % self.genome_id
if not _os.path.exists(genome_fna):
_SeqIO.write(_SeqRecord(_Seq(self.genome_sequence.tostring()), id = self.genome_id),
genome_fna,
'fasta')
jar = _os.path.sep.join(jar)
local_variants_path = _os.path.sep.join(local_variants_path)
if not _os.path.exists(local_variants_path):
_os.makedirs(local_variants_path)
local_variants_path_genome = _os.path.sep.join([
local_variants_path,
self.genome_id])
if not _os.path.exists(local_variants_path_genome):
_os.makedirs(local_variants_path_genome)
max_processes = _decide_max_processes( max_cpus )
start_time = _time.time()
paths_to_raw_VCFs = []
exe = [use_java, '-Xmx%sg' % mem_num_gigs, '-jar', jar]
# call the last set of ready BAMs added
for cnum,BAM in enumerate(self.ready_BAMs[-1]):
VCF_out = BAM[:-4] + '_unfiltered.VCF'
VCF_out = _os.path.sep.join([local_variants_path_genome, VCF_out.split(_os.path.sep)[-1]])
if not _os.path.exists(VCF_out) or force:
# cmd should be built as 'option':[argument list] dictionary
# with None as values for flag options
cmd = {}
# '-T', 'HaplotypeCaller', '-R', genome_fna, '-I', BAM, #'-L', '20',
cmd['-T'] = ['HaplotypeCaller']
cmd['-R'] = [genome_fna]
cmd['-I'] = [BAM]
# '--genotyping_mode', 'DISCOVERY',
cmd['--genotyping_mode'] = ['DISCOVERY']
#'--sample_ploidy', '1',
cmd['--sample_ploidy'] = ['1']
#'--heterozygosity', '0.0001', # this is less expected diversity than the default 0.001
cmd['--heterozygosity'] = ['0.0001']
#'--indel_heterozygosity', '0.00001', # this is less expected diversity than the default 0.0001
cmd['--indel_heterozygosity'] = ['0.00001']
#'--emitRefConfidence', 'GVCF', # not for single sample calling
#'--variant_index_type', 'LINEAR',
cmd['--variant_index_type'] = ['LINEAR']
#'--variant_index_parameter', '128000',
cmd['--variant_index_parameter'] = ['128000']
#'-nct', str(max_processes),
cmd['-nct'] = [str(max_processes)]
#'-stand_emit_conf', '10',
cmd['-stand_emit_conf'] = ['10']
#'-stand_call_conf', '20',
cmd['-stand_call_conf'] = ['20']
#'-o', VCF_out]
cmd['-o'] = [VCF_out]
if arguments:
# overwrite defaults with direct arguments
# (e.g. via -A/--arguments cli)
from baga import parse_new_arguments
cmd = parse_new_arguments(arguments, cmd)
# make commands into a list suitable for subprocess
cmds = []
for opt,arg in cmd.items():
cmds += [opt]
if arg is not None:
cmds += arg
print('Called: %s' % (' '.join(map(str, exe + cmds))))
_subprocess.call(exe + cmds)
else:
print('Found:')
print(VCF_out)
print('use "force = True" to overwrite')
paths_to_raw_VCFs += [VCF_out]
# report durations, time left etc
_report_time(start_time, cnum, len(self.ready_BAMs[-1]))
# add to a list because this is done twice
if hasattr(self, 'path_to_unfiltered_VCF'):
self.path_to_unfiltered_VCF += [paths_to_raw_VCFs]
else:
self.path_to_unfiltered_VCF = [paths_to_raw_VCFs]
def CallgVCFsGATK(self,
jar = ['external_programs', 'GenomeAnalysisTK', 'GenomeAnalysisTK.jar'],
local_variants_path = ['variants'],
use_java = 'java',
force = False,
mem_num_gigs = 8,
max_cpus = -1,
arguments = False):
'''
Part of GATK "Best Practices" for DNA sequencing variant calling
https://www.broadinstitute.org/gatk/guide/best-practices/?bpm=DNAseq
this method follows the joint genotyping calling described here:
https://www.broadinstitute.org/gatk/guide/article?id=3893
the method .GenotypeGVCFsGATK() should be called after this.
An output after this method and .GenotypeGVCFsGATK() is a str to the
path of the combined joint genotyped VCF. Because there is a
recalibration step, the str is added to a list here:
self.path_to_unfiltered_VCF
If this is a list of str (not lists), downstream steps can infer whether a
joint genotyping (not separate) analysis is being run.
max_cpus for this GATK module is "cpu threads per data thread"
'''
print(self.genome_id)
genome_fna = 'genome_sequences/%s.fna' % self.genome_id
if not _os.path.exists(genome_fna):
_SeqIO.write(_SeqRecord(_Seq(self.genome_sequence.tostring()), id = self.genome_id),
genome_fna,
'fasta')
jar = _os.path.sep.join(jar)
local_variants_path = _os.path.sep.join(local_variants_path)
if not _os.path.exists(local_variants_path):
_os.makedirs(local_variants_path)
local_variants_path_genome = _os.path.sep.join([
local_variants_path,
self.genome_id])
if not _os.path.exists(local_variants_path_genome):
_os.makedirs(local_variants_path_genome)
max_processes = _decide_max_processes( max_cpus )
start_time = _time.time()
paths_to_raw_gVCFs = []
exe = [use_java, '-Xmx%sg' % mem_num_gigs, '-jar', jar]
# call the last set of ready BAMs added
for cnum,BAM in enumerate(self.ready_BAMs[-1]):
VCF_out = BAM[:-4] + '_unfiltered.gVCF'
VCF_out = _os.path.sep.join([local_variants_path_genome, VCF_out.split(_os.path.sep)[-1]])
if not _os.path.exists(VCF_out) or force:
# cmd should be built as 'option':[argument list] dictionary
# with None as values for flag options
cmd = {}
# '-T', 'HaplotypeCaller', '-R', genome_fna, '-I', BAM, #'-L', '20',
cmd['-T'] = ['HaplotypeCaller']
cmd['-R'] = [genome_fna]
cmd['-I'] = [BAM]
# '--genotyping_mode', 'DISCOVERY',
cmd['--genotyping_mode'] = ['DISCOVERY']
#'--sample_ploidy', '1',
cmd['--sample_ploidy'] = ['1']
#'--heterozygosity', '0.0001', # this is less expected diversity than the default 0.001
cmd['--heterozygosity'] = ['0.0001']
#'--indel_heterozygosity', '0.00001', # this is less expected diversity than the default 0.0001
cmd['--indel_heterozygosity'] = ['0.00001']
#'--emitRefConfidence', 'GVCF', # make vcfs appropriate for doing GenotypeGVCFs after <========
cmd['--emitRefConfidence'] = ['GVCF'] # make vcfs appropriate for doing GenotypeGVCFs after
#'--variant_index_type', 'LINEAR',
cmd['--variant_index_type'] = ['LINEAR']
#'--variant_index_parameter', '128000',
cmd['--variant_index_parameter'] = ['128000']
#'-nct', str(max_processes),
cmd['-nct'] = [str(max_processes)]
#'-stand_emit_conf', '10',
cmd['-stand_emit_conf'] = ['10']
#'-stand_call_conf', '20',
cmd['-stand_call_conf'] = ['20']
#'-o', VCF_out]
cmd['-o'] = [VCF_out]
if arguments:
# overwrite defaults with direct arguments
# (e.g. via -A/--arguments cli)
from baga import parse_new_arguments
cmd = parse_new_arguments(arguments, cmd)
# make commands into a list suitable for subprocess
cmds = []
for opt,arg in cmd.items():
cmds += [opt]
if arg is not None:
cmds += arg
print('Called: %s' % (' '.join(map(str, exe + cmds))))
_subprocess.call(exe + cmds)
else:
print('Found:')
print(VCF_out)
print('use "force = True" to overwrite')
paths_to_raw_gVCFs += [VCF_out]
# report durations, time left etc
_report_time(start_time, cnum, len(self.ready_BAMs[-1]))
# add to a list because this is done twice
if hasattr(self, 'paths_to_raw_gVCFs'):
self.paths_to_raw_gVCFs += [paths_to_raw_gVCFs]
else:
self.paths_to_raw_gVCFs = [paths_to_raw_gVCFs]
def GenotypeGVCFsGATK(self, data_group_name,
jar = ['external_programs', 'GenomeAnalysisTK', 'GenomeAnalysisTK.jar'],
local_variants_path = ['variants'],
use_java = 'java',
force = False,
mem_num_gigs = 8,
arguments = False):
jar = _os.path.sep.join(jar)
local_variants_path = _os.path.sep.join(local_variants_path)
local_variants_path_genome = _os.path.sep.join([
local_variants_path,
self.genome_id])
genome_fna = 'genome_sequences/%s.fna' % self.genome_id
if not _os.path.exists(genome_fna):
_SeqIO.write(_SeqRecord(_Seq(self.genome_sequence.tostring()), id = self.genome_id),
genome_fna,
'fasta')
e1 = 'Could not find "paths_to_raw_gVCFs" attribute. \
Before starting performing joint GATK analysis, variants must be called. \
Please run:\n\
CallgVCFsGATK()\n\
method on this SAMs instance.'
assert hasattr(self, 'paths_to_raw_gVCFs'), 'Could not find "paths_to_raw_gVCFs" '\
'attribute. Before starting performing joint GATK analysis, variants must '\
'be called. Please run:\nCallgVCFsGATK()\nmethod on this SAMs instance.'
# use the last VCFs called
open('variants.list', 'w').write('\n'.join(self.paths_to_raw_gVCFs[-1]))
# this method can be called prior or post base score recalibration
# so give output a number corresponding to how many times variants called <===== need to never let total to go over two . . . <== fail with a warning/error?
use_name = '{}_{}_samples_unfiltered.vcf'.format(data_group_name, len(self.paths_to_raw_gVCFs))
VCF_out = _os.path.sep.join([local_variants_path_genome, use_name])
exe = [use_java, '-Xmx%sg' % mem_num_gigs, '-jar', jar]
# cmd should be built as 'option':[argument list] dictionary
# with None as values for flag options
cmd = {}
cmd['-T'] = ['GenotypeGVCFs']
cmd['-R'] = [genome_fna]
cmd['--heterozygosity'] = ['0.0001'] # 650 total indels prior in all samples i.e., population
cmd['--indel_heterozygosity'] = ['0.00001'] # 65 total indels prior in all samples i.e., population
cmd['-stand_emit_conf'] = ['10']
cmd['-stand_call_conf'] = ['20']
cmd['-V'] = ['variants.list']
cmd['-o'] = [VCF_out]
if arguments:
# overwrite defaults with direct arguments
# (e.g. via -A/--arguments cli)
from baga import parse_new_arguments
cmd = parse_new_arguments(arguments, cmd)
# make commands into a list suitable for subprocess
cmds = []
for opt,arg in cmd.items():
cmds += [opt]
if arg is not None:
cmds += arg
print('Called: %s' % (' '.join(map(str, exe + cmds))))
_subprocess.call(exe + cmds)
# add to a list because this is done twice
if hasattr(self, 'path_to_unfiltered_VCF'):
self.path_to_unfiltered_VCF += [VCF_out]
else:
self.path_to_unfiltered_VCF = [VCF_out]
def hardfilterSNPsGATK(self,
jar = ['external_programs', 'GenomeAnalysisTK', 'GenomeAnalysisTK.jar'],
use_java = 'java',
force = False):
jar = _os.path.sep.join(jar)
genome_fna = 'genome_sequences/%s.fna' % self.genome_id
if not _os.path.exists(genome_fna):
_SeqIO.write(_SeqRecord(_Seq(self.genome_sequence.tostring()), id = self.genome_id),
genome_fna,
'fasta')
e1 = 'Could not find "path_to_unfiltered_VCF" attribute. \
Before filtering, joint calling of variants is necessary. \
Please run:\n\
CallgVCFsGATK and GenotypeGVCFsGATK() or just CallVCFsGATK\n\
method on this SAMs instance.'
assert hasattr(self, 'path_to_unfiltered_VCF'), e1
## filtering must be done differently if
## a single joint called VCF is present
## or a set of VCFs are present
if isinstance(self.path_to_unfiltered_VCF[-1],str) or \
isinstance(self.path_to_unfiltered_VCF[-1],unicode):
# single item to joint called VCF
# extract the SNPs
raw_SNPs = self.path_to_unfiltered_VCF[-1][:-4] + '_SNPs.vcf'
cmd = [use_java, '-jar', jar,
'-T', 'SelectVariants',
'-R', genome_fna,
'-V', self.path_to_unfiltered_VCF[-1],
#'-L', '20',
'-selectType', 'SNP',
'-o', raw_SNPs]
print(' '.join(cmd))
_subprocess.call(cmd)
# filter the SNPs
hf_SNPs = (self.path_to_unfiltered_VCF[-1][:-4] + '_SNPs.vcf').replace('unfiltered','hardfiltered')
cmd = [use_java, '-jar', jar,
'-T', 'VariantFiltration',
'-R', genome_fna,
'-V', raw_SNPs,
'--filterExpression', 'QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0',
'--filterName', 'standard_hard_filter',
'-o', hf_SNPs]
print(' '.join(cmd))
_subprocess.call(cmd)
else:
# must be a list
hf_SNPs = []
for unfilteredVCF in self.path_to_unfiltered_VCF:
# single item to joint called VCF
# extract the SNPs
raw_SNPs = unfilteredVCF[:-4] + '_SNPs.vcf'
cmd = [use_java, '-jar', jar,
'-T', 'SelectVariants',
'-R', genome_fna,
'-V', unfilteredVCF,
#'-L', '20',
'-selectType', 'SNP',
'-o', raw_SNPs]
print(' '.join(cmd))
_subprocess.call(cmd)
# filter the SNPs
this_hf_SNPs = (unfilteredVCF[:-4] + '_SNPs.vcf').replace('unfiltered','hardfiltered')
cmd = [use_java, '-jar', jar,
'-T', 'VariantFiltration',
'-R', genome_fna,
'-V', raw_SNPs,
'--filterExpression', 'QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0',
'--filterName', 'standard_hard_filter',
'-o', this_hf_SNPs]
hf_SNPs += [this_hf_SNPs]
print(' '.join(cmd))
_subprocess.call(cmd)
# add to a list because this is done twice
if hasattr(self, 'path_to_hardfiltered_SNPs'):
self.path_to_hardfiltered_SNPs += [hf_SNPs]
else:
self.path_to_hardfiltered_SNPs = [hf_SNPs]
def hardfilterINDELsGATK(self,
jar = ['external_programs', 'GenomeAnalysisTK', 'GenomeAnalysisTK.jar'],
use_java = 'java',
force = False):
jar = _os.path.sep.join(jar)
genome_fna = 'genome_sequences/%s.fna' % self.genome_id
if not _os.path.exists(genome_fna):
_SeqIO.write(_SeqRecord(_Seq(self.genome_sequence.tostring()), id = self.genome_id),
genome_fna,
'fasta')
e1 = 'Could not find "path_to_unfiltered_VCF" attribute. \
Before filtering, joint calling of variants is necessary. \
Please run:\n\
CallgVCFsGATK and GenotypeGVCFsGATK() or just CallVCFsGATK\n\
method on this SAMs instance.'
assert hasattr(self, 'path_to_unfiltered_VCF'), e1
if isinstance(self.path_to_unfiltered_VCF[-1],str) or \
isinstance(self.path_to_unfiltered_VCF[-1],unicode):
# single item to joint called VCF
# extract the INDELs
raw_INDELs = self.path_to_unfiltered_VCF[-1][:-4] + '_INDELs.vcf'
cmd = [use_java, '-jar', jar,
'-T', 'SelectVariants',
'-R', genome_fna,
'-V', self.path_to_unfiltered_VCF[-1],
#'-L', '20',
'-selectType', 'INDEL',
'-o', raw_INDELs]
print(' '.join(cmd))
_subprocess.call(cmd)
# filter the INDELs
hf_INDELs = (self.path_to_unfiltered_VCF[-1][:-4] + '_INDELs.vcf').replace('unfiltered','hardfiltered')
cmd = [use_java, '-jar', jar,
'-T', 'VariantFiltration',
'-R', genome_fna,
'-V', raw_INDELs,
'--filterExpression', 'QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0',
'--filterName', 'standard_indel_hard_filter',
'-o', hf_INDELs]
print(' '.join(cmd))
_subprocess.call(cmd)
else:
# must be a list
hf_INDELs = []
for unfilteredVCF in self.path_to_unfiltered_VCF:
# single item to joint called VCF
# extract the INDELs
raw_INDELs = unfilteredVCF[:-4] + '_INDELs.vcf'
cmd = [use_java, '-jar', jar,
'-T', 'SelectVariants',
'-R', genome_fna,
'-V', unfilteredVCF,
#'-L', '20',
'-selectType', 'INDEL',
'-o', raw_INDELs]
print(' '.join(cmd))
_subprocess.call(cmd)
# filter the INDELs
this_hf_INDELs = (unfilteredVCF[:-4] + '_INDELs.vcf').replace('unfiltered','hardfiltered')
cmd = [use_java, '-jar', jar,
'-T', 'VariantFiltration',
'-R', genome_fna,
'-V', raw_INDELs,
'--filterExpression', 'QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0',
'--filterName', 'standard_indel_hard_filter',
'-o', this_hf_INDELs]
hf_INDELs += [this_hf_INDELs]
print(' '.join(cmd))
_subprocess.call(cmd)
# add to a list because this is done twice
if hasattr(self, 'path_to_hardfiltered_INDELs'):
self.path_to_hardfiltered_INDELs += [hf_INDELs]
else:
self.path_to_hardfiltered_INDELs = [hf_INDELs]
def recalibBaseScoresGATK(self,
jar = ['external_programs', 'GenomeAnalysisTK', 'GenomeAnalysisTK.jar'],
samtools_exe = ['external_programs', 'samtools', 'samtools'],
local_variants_path = ['variants'],
use_java = 'java',
force = False,
mem_num_gigs = 8,
max_cpus = -1):
'''
https://www.broadinstitute.org/gatk/guide/best-practices/?bpm=DNAseq
max_cpus for this GATK module is "cpu threads per data thread"
'''
jar = _os.path.sep.join(jar)
samtools_exe = _os.path.sep.join(samtools_exe)
genome_fna = 'genome_sequences/%s.fna' % self.genome_id
if not _os.path.exists(genome_fna):
_SeqIO.write(_SeqRecord(_Seq(self.genome_sequence.tostring()), id = self.genome_id),
genome_fna,
'fasta')
local_variants_path = _os.path.sep.join(local_variants_path)
if not _os.path.exists(local_variants_path):
_os.makedirs(local_variants_path)
local_variants_path_genome = _os.path.sep.join([
local_variants_path,
self.genome_id])
if not _os.path.exists(local_variants_path_genome):
_os.makedirs(local_variants_path_genome)
paths_to_recalibrated_BAMs = []
max_processes = _decide_max_processes( max_cpus )
start_time = _time.time()
for cnum,BAM in enumerate(self.ready_BAMs[-1]):
table_out_pre = BAM[:-4] + '_baserecal_pre.table'
if isinstance(self.path_to_hardfiltered_SNPs[-1],str) or \
isinstance(self.path_to_hardfiltered_SNPs[-1],unicode):
# joint calling was used
knownSitesVCF = self.path_to_hardfiltered_SNPs[-1]
else:
# per sample single calling was used
knownSitesVCF = self.path_to_hardfiltered_SNPs[-1][cnum]
if not _os.path.exists(table_out_pre) or force:
cmd = [use_java, '-Xmx%sg' % mem_num_gigs, '-jar', jar,
'-T', 'BaseRecalibrator',
'-R', genome_fna,
'-I', BAM,
#'-L', '20',
'-nct', str(max_processes),
'-knownSites', knownSitesVCF,
#'--validation_strictness', 'LENIENT',
'-o', table_out_pre]
print('Called: %s' % (' '.join(map(str, cmd))))
_subprocess.call(cmd)
else:
print('Found:')
print(table_out_pre)
print('use "force = True" to overwrite')
table_out_post = BAM[:-4] + '_baserecal_post.table'
if not _os.path.exists(table_out_post) or force:
cmd = [use_java, '-Xmx%sg' % mem_num_gigs, '-jar', jar,
'-T', 'BaseRecalibrator',
'-R', genome_fna,
'-I', BAM,
#'-L', '20',
'-nct', str(max_processes),
'-knownSites', knownSitesVCF,
'-BQSR', table_out_pre,
'-o', table_out_post]
print('Called: %s' % (' '.join(map(str, cmd))))
_subprocess.call(cmd)
else:
print('Found:')
print(table_out_post)
print('use "force = True" to overwrite')
BAM_out = BAM[:-4] + '_baserecal.bam'
if not _os.path.exists(BAM_out) or force:
cmd = [use_java, '-Xmx%sg' % mem_num_gigs, '-jar', jar,
'-T', 'PrintReads',
'-R', genome_fna,
'-I', BAM,
'-nct', str(max_processes),
'-BQSR', table_out_post,
'-o', BAM_out]
print('Called: %s' % (' '.join(map(str, cmd))))
_subprocess.call(cmd)
else:
print('Found:')
print(BAM_out)
print('use "force = True" to overwrite')
cmd = [samtools_exe, 'index', BAM_out]
_subprocess.call(cmd)
paths_to_recalibrated_BAMs += [BAM_out]
# report durations, time left etc
_report_time(start_time, cnum, len(self.ready_BAMs[-1]))
# the last list of BAMs in ready_BAMs is input for CallgVCFsGATK
# both IndelRealignGATK and recalibBaseScoresGATK put here
self.ready_BAMs += [paths_to_recalibrated_BAMs]
class CallerDiscoSNP:
'''
Wrapper around DiscoSNP++ for SNP and InDel calling from short reads.
Requires a collection of short read datasets. Alignment and reference-free.
Can produce a VCF file if a reference genome provided.
'''
def __init__(self, reads = False,
genome = False,
path_to_baga = False):
'''
Initialise with:
a list of one or more baga.PrepareReads.Reads object and optionally,
a baga.CollectData.Genome object.
OR
a path to baga.CallVariants.CallerDiscoSNP object (like this one) that
was previously saved.
'''
assert reads or path_to_baga, 'Instantiate with reads or a previously saved CallerDiscoSNP'
assert not (reads and path_to_baga), 'Instantiate with reads or a previously saved CallerDiscoSNP!'
if reads:
reads_paths = []
for read_group in reads:
reads_paths += [{}]
for attribute_name in dir(read_group):
if 'read_files' in attribute_name:
reads_paths[-1][attribute_name] = getattr(read_group, attribute_name)
self.reads = reads_paths
if genome:
self.genome_sequence = genome.sequence
self.genome_id = genome.id
elif path_to_baga:
with _tarfile.open(path_to_baga, "r:gz") as tar:
for member in tar:
contents = _StringIO(tar.extractfile(member).read())
try:
# either json serialised conventional objects
contents = _json.loads(contents.getvalue())
except ValueError:
#print('json failed: {}'.format(member.name))
# or longer python array.array objects
contents = _array('c', contents.getvalue())
setattr(self, member.name, contents)
def saveLocal(self, name):
'''
Save processed object info to a local compressed archive of json strings.
'name' should exclude extension: .baga will be added
'''
fileout = 'baga.CallVariants.CallerDiscoSNP-%s.baga' % name
with _tarfile.open(fileout, "w:gz") as tar:
print('Writing to {} . . . '.format(fileout))
for att_name, att in self.__dict__.items():
if isinstance(att, _array):
io = _StringIO(att.tostring())
io.seek(0, _os.SEEK_END)
length = io.tell()
io.seek(0)
thisone = _tarfile.TarInfo(name = att_name)
thisone.size = length
tar.addfile(tarinfo = thisone, fileobj = io)
else:
# try saving everything else here by jsoning
try:
io = _StringIO()
_json.dump(att, io)
io.seek(0, _os.SEEK_END)
length = io.tell()
io.seek(0)
thisone = _tarfile.TarInfo(name = att_name)
thisone.size = length
tar.addfile(tarinfo = thisone, fileobj = io)
except TypeError:
# ignore non-jsonable things like functions
# include unicodes, strings, lists etc etc
#print('omitting {}'.format(att_name))
pass
def call(self,
local_variants_path = ['variants_DiscoSNP'],
path_to_bwa = False,
use_existing_graph = False,
add_prefix = False,
force = False,
max_cpus = -1,
arguments = False):
'''
Call SNPs and InDels using kissnp2 and kissreads2 of DiscoSNP++
'''
path_to_exe = _get_exe_path('discosnp')
local_variants_path = _os.path.sep.join(local_variants_path)
if not _os.path.exists(local_variants_path):
_os.makedirs(local_variants_path)
if hasattr(self, 'genome_id'):
use_genome_name = self.genome_id
else:
use_genome_name = 'no_reference'
local_variants_path_genome = _os.path.sep.join([
local_variants_path,
use_genome_name])
if not _os.path.exists(local_variants_path_genome):
_os.makedirs(local_variants_path_genome)
max_processes = _decide_max_processes( max_cpus )
# select read files to use
use_reads = {}
for n,reads_group in enumerate(self.reads):
if 'trimmed_read_files' in reads_group:
for pairname,pair in reads_group['trimmed_read_files'].items():
use_reads[pairname] = pair
elif 'adaptorcut_read_files' in reads_group:
print('WARNING: "trimmed_read_files" attribute not found in '\
'baga.PreparedReads.Reads object {}'.format(n+1))
for pairname,pair in reads_group['adaptorcut_read_files'].items():
use_reads[pairname] = pair
print('Using read files that have not been trimmed by position score')
else:
print('WARNING: "trimmed_read_files" nor "adaptorcut_read_files" attribute '\
'not found in baga.PreparedReads.Reads object {}'.format(n+1))
assert 'read_files' in reads_group, 'Try recreating '\
'baga.PreparedReads.Reads . . . could not find '\
'any read files.'
for pairname,pair in reads_group['read_files'].items():
use_reads[pairname] = pair
print('Using read files that have not been trimmed by position score nor had '\
'potential library preparation artifacts removed ("adaptor" sequences '\
'etc) {}'.format(n+1))
pair_file_names = []
for sample_name, pair in use_reads.items():
this_file_name = _os.path.sep.join([
local_variants_path_genome,
'readspair_paths_for_{}.txt'.format(sample_name)])
pair_file_names += [this_file_name]
with open(this_file_name, 'w') as fout:
fout.write('{}\n{}\n'.format(
_os.path.abspath(pair[1]),
_os.path.abspath(pair[2])))
this_file_name = _os.path.sep.join([local_variants_path_genome,
'readpairs_for_DiscoSNP.txt'])
with open(this_file_name, 'w') as fout:
for pair_file_name in pair_file_names:
fout.write('{}\n'.format(_os.path.abspath(pair_file_name)))
try:
# previous failed runs can leave this file which causes problems
_os.unlink(this_file_name+'_removemeplease')
except OSError:
pass
# cmd should be built as 'option':[argument list] dictionary
# with None as values for flag options
cmd = {}
cmd['-r'] = [this_file_name]
cmd['-T'] = None
if hasattr(self, 'genome_sequence') and hasattr(self, 'genome_id'):
print('Will map DiscoSNP++ variants to {} ({:,} bp)'.format(
self.genome_id, len(self.genome_sequence)))
if not path_to_bwa:
path_to_bwa = _get_exe_path('bwa')
if _os.path.isfile(path_to_bwa):
# VCF maker only wants path to bwa exe, not the exe itself
path_to_bwa = path_to_bwa[:-4]
genome_fna = 'genome_sequences/%s.fna' % self.genome_id
if not _os.path.exists(genome_fna):
_SeqIO.write(_SeqRecord(_Seq(self.genome_sequence.tostring()), id = self.genome_id),
genome_fna,
'fasta')
cmd['-G'] = [genome_fna]
cmd['-B'] = [path_to_bwa]
if use_existing_graph:
print('Will attempt to use existing graph: be sure your input reads match the graph!')
# discoRes_k_31_c_auto.h5 for default settings
cmd['-g'] = None
if add_prefix:
cmd['-p'] = [add_prefix+'__DiscoSNP']
if arguments:
# overwrite defaults with direct arguments
# (e.g. via -A/--arguments cli)
from baga import parse_new_arguments
cmd = parse_new_arguments(arguments, cmd)
# make commands into a list suitable for subprocess
cmds = []
for opt,arg in cmd.items():
cmds += [opt]
if arg is not None:
cmds += arg
print(' '.join([path_to_exe] + cmds))
_subprocess.call([path_to_exe] + cmds)
class Summariser:
'''
Summarise variants in a VCF in various ways.
Parse VCF files and create .csv files for viewing in spreadsheets
'''
def __init__(self, VCF_paths = False, path_to_caller = False, genomes = False):
'''
A CallVariants.Summariser object can be instantiated with:
- a list of paths to VCF files
or
- path to a CallVariants.CallerGATK object
or
- path to a CallVariants.CallerDiscoSNP object
'''
assert bool(VCF_paths) ^ bool(path_to_caller), 'cannot instantiate with both '\
'paths to VCFs and a Caller object'
if VCF_paths:
for VCF in VCF_paths:
assert _os.path.exists(VCF), 'Could not find {}.\nPlease ensure all '\
'files exist'.format(VCF)
self.VCF_paths = VCF_paths
elif path_to_caller:
raise NotImplementedError('only direct paths to VCFs is currently implemented')
# allow genomes to be provided as single object for 1 replicon
# or list or tuple for one or more
# or False
if isinstance(genomes, list) or isinstance(genomes, tuple):
self.genomes = {genome.id:genome for genome in genomes}
elif genomes:
self.genomes = {genomes.id:genomes}
else:
self.genomes = genomes
def collect_variants(self):
'''
Collect variants from VCF files for
'''
all_variants = {}
all_headerdicts = {}
for VCF in self.VCF_paths: #break
header, header_section_order, these_colnames, variantrows = parseVCF(VCF)
if len(variantrows) == 0:
print('WARNING: no variants found in {}'.format(VCF))
continue
headerdict = dictify_vcf_header(header)
all_headerdicts[VCF] = headerdict
#print(headerdict)
variants, allfilters = sortVariantsKeepFilter(header, these_colnames, variantrows)
#print(variants)
for sample,chromosomes in variants.items():
if sample not in all_variants:
all_variants[sample] = {}
for chromosome,positions in chromosomes.items():
if chromosome not in all_variants[sample]:
all_variants[sample][chromosome] = {}
for pos1,info in positions.items():
if pos1 in all_variants[sample][chromosome]:
if all_variants[sample][chromosome][pos1] != info:
print('WARNING: VCFs in conflict for {} in chromosome '\
'{} at position {}. You could try processing '\
'VCFs one at a time and compare tables produced. '\
'A variant at this position was omitted.'\
''.format(sample,chromosome,pos1))
else:
all_variants[sample][chromosome][pos1] = info
return(all_variants, all_headerdicts)
def simple(self, nodata_char = '?'):
'''
List all variants with rows corresponding to those in the VCF file(s) in a .csv file
'''
all_variants, all_headerdicts = self.collect_variants()
if len(all_variants) == 0:
print('WARNING: no variants found the supplied VCF files!')
return()
# ['ALT', 'FILTER', 'FORMAT', 'GATKCommandLine', 'INFO', 'contig']
genome_IDs = set()
for VCF,headerdict in all_headerdicts.items():
try:
genome_IDs.update([contig['ID'] for contig in headerdict['contig']])
except KeyError:
print('WARNING: no contig information found in header of {}. Reference free?'.format(VCF))
if len(genome_IDs):
print('Chromosomes found: {}'.format(', '.join(genome_IDs)))
else:
print('No chromsome information found in: {}'.format(', '.join(sorted(all_headerdicts))))
# assert len(genome_IDs) == 1, "only one reference sequence at a time is "\
# "implemented for simple summary. {} found: {}".format(len(genome_IDs),
# ', '.join(genome_IDs))
if self.genomes:
annotate_with_these = set(self.genomes) & genome_IDs
requested_not_found = set(self.genomes) - genome_IDs
found_not_requested = genome_IDs - set(self.genomes)
if len(requested_not_found) == 1:
print('WARNING: {} was requested for use with annotations, but not found in VCFs'\
''.format(sorted(requested_not_found)[0]))
elif len(requested_not_found) > 1:
print('WARNING: {} were requested for use with annotations, but not found in VCFs'\
''.format(', '.join(sorted(requested_not_found))))
if len(found_not_requested) == 1:
print('WARNING: {} was found in VCFs, but not provided for use with annotations'\
''.format(sorted(found_not_requested)[0]))
elif len(found_not_requested) > 1:
print('WARNING: {} were found in VCFs, but not provided for use with annotations'\
''.format(', '.join(sorted(found_not_requested))))
if len(genome_IDs) == 0:
annotate_with_these = set(self.genomes)
print('WARNING: Assuming VCFs were called reference free because no chromosome '\
'information found in VCFs. Cannot check provided chromosomes match '\
'variants VCF: please double check annotations!')
if len(annotate_with_these):
print('Using {} for annotations'.format(', '.join(sorted(annotate_with_these))))
# collect by chromsome,position
by_position = _defaultdict(dict)
by_position_nodata = _defaultdict(dict)
by_position_freqs = _defaultdict(dict)
annotations = {}
for sample,chromosomes in all_variants.items():
for chromosome,positions in chromosomes.items():
for pos1,((r,q),filters) in positions.items():
## this was to keep sortVariantsKeepFilter()
## compatible with other uses that assume single clone, always 1/1
if isinstance(q, tuple):
ALT,freq,total = q
use_q = ALT
else:
freq,total = 1,1
use_q = q
if use_q == '.':
# . in VCF is insufficient data to call:
# the data is missing (unknown)
# use desired "no data" character
by_position_nodata[chromosome,pos1][sample] = nodata_char
# no need to attempt codon prediction or
# add a row as a real variant would below
continue
try:
by_position[chromosome,pos1][(r,use_q)][sample] = filters
by_position_freqs[chromosome,pos1][(r,use_q)][sample] = freq,total
except KeyError:
by_position[chromosome,pos1][(r,use_q)] = {}
by_position[chromosome,pos1][(r,use_q)][sample] = filters
by_position_freqs[chromosome,pos1][(r,use_q)] = {}
by_position_freqs[chromosome,pos1][(r,use_q)][sample] = freq,total
try:
ORFs_info = [(ORF_id,s,e,st,gene_name) for ORF_id,(s,e,st,gene_name) in \
sorted(self.genomes[chromosome].ORF_ranges.items()) if s < pos1 < e]
if len(ORFs_info) > 1:
print('WARNING: variant in more than one ORF (overlapping). '\
'Detailed annotations not yet implemented (marked as "multi")')
multi_ORF_IDs = ','.join([ORF_id for ORF_id,s,e,st,gene_name in ORFs_info])
multi_ORF_names = ','.join([gene_name for ORF_id,s,e,st,gene_name in ORFs_info])
annotations[(chromosome,pos1,r,use_q)] = ("multi", "multi",
"multi", "multi", "multi", multi_ORF_IDs, multi_ORF_names, "multi")
elif len(ORFs_info) == 1:
ORF_id,s,e,strand,gene_name = ORFs_info[0]
ORF_seq = self.genomes[chromosome].sequence[s:e].tostring()
ORF0 = pos1 - 1 - s
ORF0_codon_start = ORF0 - ORF0 % 3
ref_codon = ORF_seq[ORF0_codon_start:ORF0_codon_start+3]
frame1 = ORF0 - ORF0_codon_start + 1
if len(r) == len(use_q) == 1:
# substitution
var_codon = list(ref_codon)
var_codon[frame1-1] = use_q
var_codon = ''.join(var_codon)
assert self.genomes[chromosome].sequence[pos1-1] == ref_codon[frame1-1]
if st == 1:
var_AA = str(_Seq(var_codon).translate())
ref_AA = str(_Seq(ref_codon).translate())
else:
var_AA = str(_Seq(var_codon).reverse_complement().translate())
ref_AA = str(_Seq(ref_codon).reverse_complement().translate())
var_codon = str(_Seq(var_codon).reverse_complement())
ref_codon = str(_Seq(ref_codon).reverse_complement())
frame1 = 3 - (frame1-1)
else:
# indel
var_codon = '-'
var_AA = '-'
if st == -1:
frame1 = 3 - (frame1-1)
ref_AA = str(_Seq(ref_codon).reverse_complement().translate())
ref_codon = str(_Seq(ref_codon).reverse_complement())
else:
ref_AA = str(_Seq(ref_codon).translate())
annotations[(chromosome,pos1,r,use_q)] = (ref_codon, var_codon,
ref_AA, var_AA, frame1, ORF_id, gene_name, strand)
except (KeyError, TypeError):
pass
except _TranslationError:
print('WARNING: problem encountered translating a DNA sequence '\
'causing a variant to be omitted from the summary. '\
'This may be caused by unexpected characters including "*" '\
'sometimes introduced by variant callers.')
print('Problem variant was: "{}" at position {} in seq. ID "{}", '\
'sample "{}"'.format(q, pos1, chromosome, sample))
pass
# allow KeyError that defaultdict does not raise
by_position_nodata = dict(by_position_nodata)
all_chromosomes = sorted(set([a for b in all_variants.values() for a in b]))
filenameout = 'Simple_summary_for_{}_and_{}_others__{}.csv'.format(sorted(all_variants)[0],
len(all_variants)-1, '_'.join(all_chromosomes))
sample_order = sorted(all_variants)
def quote(v):
if str(v).replace('-','').replace('.','').isdigit():
return(str(v))
else:
return('"'+v+'"')
colnames = ['Chromosome','Position (bp)', 'Reference', \
'Variant', 'Gene ID', 'Gene name', 'Strand', 'Reference codon', 'Variant codon', \
'Reference AA', 'Variant AA', 'Codon position'] + sample_order + \
['Frequency', 'Frequency (filtered)']
with open(filenameout, 'w') as fout:
print('Writing to {}'.format(filenameout))
fout.write(','.join(map(quote,colnames))+'\n')
for (chromosome,pos1),variants in sorted(by_position.items()):
for (r,q),samples in variants.items():
try:
(ref_codon, var_codon, ref_AA, var_AA, frame1, ORF_id, \
gene_name, strand) = annotations[(chromosome,pos1,r,q)]
except KeyError:
(ref_codon, var_codon, ref_AA, var_AA, frame1, ORF_id, \
gene_name, strand) = '', '', '', '', -1, '', '', 0
this_row = [chromosome,pos1,r,q] + [ORF_id, gene_name, strand, ref_codon, \
var_codon, ref_AA, var_AA, frame1]
freq_all = 0
freq_filtered = 0
for sample in sample_order:
try:
this_row += [by_position_nodata[chromosome,pos1][sample]]
except KeyError:
# not no data ambiguity so get freq
try:
freq,total = by_position_freqs[chromosome,pos1][(r,q)][sample]
if freq == total == 1:
val = 1
else:
val = float(freq) / float(total)
filters = samples[sample]
# what does the {'.'} here mean for filters?
if filters == set(['.']) or \
filters == set(['PASS']) or \
len(filters) == 0:
this_row += [val]
freq_filtered += 1
else:
this_row += ['+'.join(filters)]
freq_all += 1
except KeyError:
this_row += [0]
this_row += [freq_all, freq_filtered]
fout.write(','.join(map(quote,this_row))+'\n')
class Checker:
'''
Check variants in a VCF against regional de novo assemblies.
Extract short reads from a BAM aligned around each variant, de novo
assemble using SPAdes, then optimal global align contigs back to those
regions and check for variants.
'''
def __init__(self, VCF_paths, BAM_paths, genome):
'''
A CallVariants.Checker object must be instantiated with:
- a list of paths to VCF files
- a CollectData genome object
'''
e = 'Could not find %s.\nPlease ensure all files exist'
for VCF in VCF_paths:
assert _os.path.exists(VCF), e % VCF
for BAM in BAM_paths:
assert _os.path.exists(VCF), e % BAM
self.VCF_paths = VCF_paths
self.BAM_paths = BAM_paths
self.genome = genome
def saveLocal(self, name):
'''
Save processed object info to a local compressed archive of json strings.
'name' should exclude extension: .baga will be added
'''
fileout = 'baga.CallVariants.Checker-%s.baga' % name
with _tarfile.open(fileout, "w:gz") as tar:
print('Writing to {} . . . '.format(fileout))
for att_name, att in self.__dict__.items():
if isinstance(att, _array):
io = _StringIO(att.tostring())
io.seek(0, _os.SEEK_END)
length = io.tell()
io.seek(0)
thisone = _tarfile.TarInfo(name = att_name)
thisone.size = length
tar.addfile(tarinfo = thisone, fileobj = io)
else:
# try saving everything else here by jsoning
try:
io = _StringIO()
_json.dump(att, io)
io.seek(0, _os.SEEK_END)
length = io.tell()
io.seek(0)
thisone = _tarfile.TarInfo(name = att_name)
thisone.size = length
tar.addfile(tarinfo = thisone, fileobj = io)
except TypeError:
# ignore non-jsonable things like functions
# include unicodes, strings, lists etc etc
#print('omitting {}'.format(att_name))
pass
def collect_variants(self):
'''
Collect variants from VCF files for
'''
all_variants = {}
for VCF in self.VCF_paths: #break
header, header_section_order, these_colnames, variantrows = parseVCF(VCF)
headerdict = dictify_vcf_header(header)
#print(headerdict)
variants, allfilters = sortVariantsKeepFilter(header, these_colnames, variantrows)
#print(variants)
all_variants = dict(all_variants.items() + variants.items())
return(all_variants)
def doCheck(self, num_padding = 2000, max_memory = False, force = False):
'''
De novo assemble variant regions and compare with variant calls
'''
# VCFs and genome provided at instantiation
# do some checks on provided genome and VCFs
genome_ids = {}
genome_lengths = {}
pattern = _re.compile('##contig=<ID=([A-Za-z0-9\._]+),length=([0-9]+)>')
for VCF in self.VCF_paths: #break
for line in open(VCF):
if line[:9] == '##contig=':
try:
genome_id, genome_length = _re.match(pattern, line).groups()
except AttributeError:
print('Failed to parse genome information from {}'.format(line))
genome_lengths[int(genome_length)] = VCF
genome_ids[genome_id] = VCF
#print(genome_id, genome_length)
identified = True
break
e = "Failed to identify which chromosome the variants in {} were called on (couldn't find '##contig=')".format(VCF)
assert identified, e
e = 'Differing reference genome among provided VCFs? {}'.format(genome_ids.items())
assert len(genome_ids) == 1, e
e = 'Genome provided ({}) does not match genome ID in provided VCFs: {}'.format(self.genome.id, genome_ids.keys()[0])
assert self.genome.id == genome_ids.keys()[0], e
print('Variants were called against {:,} bp genome: {}\n'.format(int(genome_length), genome_id))
all_variants = self.collect_variants()
BAMs_by_ids = {}
for BAM in self.BAM_paths:
header = _pysam.Samfile(BAM, 'rb').header
BAMs_by_ids[(header['RG'][0]['ID'],header['SQ'][0]['SN'])] = BAM
assert header['SQ'][0]['SN'] == self.genome.id, 'mismatch between '\
'BAM genome ({}) and genome used by BAGA ({})'.format(
header['SQ'][0]['SN'], self.genome.id)
use_samples = set(all_variants) & set([sample for sample,chromosome in BAMs_by_ids])
print('Found {} samples common to supplied VCFs and BAMs:\n{}'.format(
len(use_samples),', '.join(use_samples)))
from baga import Structure
try:
_os.mkdir('variant_checks')
except OSError:
pass
path_to_variant_checks = ['variant_checks', self.genome.id]
# inconsistant requirements for path (list or str) below
path_to_variant_checks_str = _os.path.sep.join(path_to_variant_checks)
try:
_os.mkdir(path_to_variant_checks_str)
except OSError:
pass
import baga
denovo_info = {}
all_contigs = {}
for sample in use_samples:
denovo_info[sample] = {}
all_contigs[sample] = {}
chromosomes = all_variants[sample]
for chromosome,variants in chromosomes.items():
collector = Structure.Collector(BAMs_by_ids[(sample, chromosome)])
single_assembly = False
R1 = 'variant_checks/{}/{}__{}_unmapped_R1.fastq'.format(chromosome,sample,chromosome)
R2 = 'variant_checks/{}/{}__{}_unmapped_R2.fastq'.format(chromosome,sample,chromosome)
RS = 'variant_checks/{}/{}__{}_unmapped_S.fastq'.format(chromosome,sample,chromosome)
if _os.path.exists(R1) and _os.path.exists(R2) and \
_os.path.getsize(R1) > 0 and _os.path.getsize(R2) > 0 \
and not force:
print('Found unmapped reads at {} and {}\nUse --force/-F to overwrite'.format(R1,R2))
r1_out_path_um, r2_out_path_um, rS_out_path_um = R1, R2, RS
else:
print('Extracting poorly and unaligned reads for sample {}'.format(sample))
collector.getUnmapped()
r1_out_path_um, r2_out_path_um, rS_out_path_um = \
collector.writeUnmapped(path_to_variant_checks_str)
import AssembleReads
if max_memory:
use_mem_gigs = max_memory
else:
# round down available GBs
use_mem_gigs = int(baga.get_available_memory())
# unless to zero!
if use_mem_gigs == 0:
use_mem_gigs = 1
reads_path_unmapped = {}
output_folder_um = '_'.join(
r1_out_path_um.split('_')[:-1]).split(_os.path.sep)[-1]
reads_path_unmapped[output_folder_um] = (r1_out_path_um,
r2_out_path_um, rS_out_path_um)
path_to_bad_unmapped_contigs = _os.path.sep.join([
path_to_variant_checks_str, output_folder_um,
'contigs.fasta'])
if _os.path.exists(path_to_bad_unmapped_contigs) and \
_os.path.getsize(path_to_bad_unmapped_contigs) > 0 and \
not force:
print('Found assembly at {}\nUse --force/-F to overwrite. '\
'Skipping . . .'.format(path_to_bad_unmapped_contigs))
else:
if not force:
# if args.force specified, don't need to say anything
# either way, do assembly
print('Nothing found at {}. Doing assembly.'.format(
path_to_bad_unmapped_contigs))
reads = AssembleReads.DeNovo(paths_to_reads = reads_path_unmapped)
reads.SPAdes(output_folder = path_to_variant_checks,
mem_num_gigs = use_mem_gigs, only_assembler = True,
careful = False)
# assemble read from each region with poorly/unmapped
reads_paths = {}
# make a second dict of reads for assembly, all values for unmapped reads
# that need to be included in each assembly
# first, collect reads, make fastqs, recording in a dict
reads_path_unmapped = {}
assemblies_by_variant = {}
# join regions with multiple variants to avoid redundacy in de novo assemblies
join_dist = 10000
position_regions = []
allpositions = sorted(variants)
this_region = [allpositions[0]]
for pn,pos1 in enumerate(allpositions[:-1]):
if pos1 + join_dist > allpositions[pn+1]:
this_region += [allpositions[pn+1]]
else:
position_regions += [this_region]
this_region = [allpositions[pn+1]]
position_regions += [this_region]
regions_for_de_novo = []
for these_positions in position_regions:
regions_for_de_novo += [(these_positions[0]-500,these_positions[-1]+500)]
if regions_for_de_novo[-1][0] < 0:
regions_for_de_novo[-1][0] = 0
if regions_for_de_novo[-1][1] > len(self.genome.sequence):
## this bit not compatible with multiple chromosomes
regions_for_de_novo[-1][1] = len(self.genome.sequence)
# ensure no very long contigs else PW alignment will not be possible
# longer regions more likely for samples more divergent from reference
# use ajoining regions with small overlaps instead
regions_for_de_novo2 = []
# this seems to be maximum length before seq-align seg-faults
# about 16GB memory required for these long alignments
max_len = 20000
for s,e in regions_for_de_novo:
if e - s > max_len:
num_pieces = ((e-s)//float(max_len)+1)
newlen = int((e-s) / num_pieces)
for i in range(int(num_pieces)):
# make internal join overlap incase variant close to a join
pre_pad = 0
end_pad = 0
if i > 0:
pre_pad = 100
if i < num_pieces:
end_pad = 100
new_s,new_e = s+(newlen*i)-pre_pad,s+(newlen*(i+1)+end_pad)
regions_for_de_novo2 += [[new_s,new_e]]
else:
regions_for_de_novo2 += [[s,e]]
# print('allpositions',len(allpositions))
# print('regions_for_de_novo',len(regions_for_de_novo))
# print('regions_for_de_novo2',len(regions_for_de_novo2))
num_padding = 0
#for pos1,info in sorted(variants.items()):
# print('Extracting reads aligned near variant at {} in sample {}'\
# ''.format(pos1, sample))
# collector.makeCollection(pos1 - 1, pos1, num_padding)
for s,e in regions_for_de_novo2:
print('Extracting reads aligned over variant from {} to {} in sample {}'\
''.format(s, e, sample))
collector.makeCollection(s, e, 0)
r1_out_path, r2_out_path, rS_out_path = collector.writeCollection(
path_to_variant_checks_str)
if not r1_out_path:
# if no reads found, False returned
print('WARNING: No reads found in region +/- {} positions around '\
'variant at {}'.format(num_padding, pos1))
continue
# put assembly in folder with same name as read files
output_folder = '_'.join(r1_out_path.split('_')[:-1]).split(
_os.path.sep)[-1]
print(output_folder)
path_to_contigs = _os.path.sep.join([path_to_variant_checks_str,
output_folder,
'contigs.fasta'])
# collect all contigs paths to be assembled and aligning
# for summarising below
assemblies_by_variant[s, e] = path_to_contigs
if _os.path.exists(path_to_contigs) and \
_os.path.getsize(path_to_contigs) > 0 and \
not force:
print('Found assembly at {}\nUse --force/-F to overwrite. '\
'Skipping . . .'.format(path_to_contigs))
else:
# if omitted from this "reads_paths" dict, no assembly done
# but aligning still done if in "assemblies_by_variant"
reads_paths[output_folder] = (r1_out_path, r2_out_path,
rS_out_path)
reads_path_unmapped[output_folder] = (r1_out_path_um,
r2_out_path_um, rS_out_path_um)
# second, run each assembly in single call to
# AssembleReads.DeNovo.SPAdes
print('Assemble reads for each region around variants')
reads = AssembleReads.DeNovo(paths_to_reads = reads_paths,
paths_to_reads2 = reads_path_unmapped)
reads.SPAdes(output_folder = path_to_variant_checks,
mem_num_gigs = use_mem_gigs, single_assembly = single_assembly,
only_assembler = True, careful = False)
all_contigs[sample][chromosome] = assemblies_by_variant
# a dict of paths to contigs per region
aligner = Structure.Aligner(self.genome)
unmappedfasta = _os.path.sep.join([path_to_variant_checks_str,
output_folder_um,
'contigs.fasta'])
if _os.path.exists(unmappedfasta) and _os.path.getsize(unmappedfasta) > 0:
# provide dict of range tuples
aligner.alignRegions(assemblies_by_variant, num_padding,
path_to_omit_sequences = unmappedfasta,
single_assembly = single_assembly, min_region_length = 0)
denovo_info[sample][chromosome] = aligner.reportAlignments()
else:
print('WARNING: no assembled unmapped and poorly mapped reads found at:\n{}'.format(unmappedfasta))
try:
r1_size = _os.path.getsize(r1_out_path_um)
r2_size = _os.path.getsize(r2_out_path_um)
print('but reads, {} ({:,} bytes) and {} ({:,} bytes), exist . . check SPAdes assembly log in {}'.format(
r1_out_path_um,
r1_size,
r2_out_path_um,
r2_size,
unmappedfasta.replace('contigs.fasta','')))
except IOError:
print('WARNING: could not find unmapped and poorly '\
'aligned reads at:\n{}\n{}\nthis is unexpected but '\
'conceivable (if ALL reads really did map to reference!).'.format(
r1_out_path_um,r2_out_path_um))
print('proceeding with alignment of assembled putatively '\
'rearranged regions to reference nonetheless')
aligner.alignRegions(assemblies_by_variant, num_padding,
single_assembly = single_assembly, min_region_length = 0,
force = False)
denovo_info[sample][chromosome] = aligner.reportAlignments()
num_padding = 0
# collect all the de novo assembly called variants and compare
table_outname = 'variant_checks/{}/Table_of_variants_with_de_novo_comparison__{}_and_{}_others.csv'\
''.format(chromosome,sorted(use_samples)[0],len(use_samples)-1)
colnames = ['Sample','Chromosome','Position (bp base-1)','Reference','Variant','Status','Filters','Alignment']
with open(table_outname,'w') as fout:
fout.write(','.join(['"{}"'.format(a) for a in colnames])+'\n')
zeropadding = len(str(len(self.genome.sequence)))
agree = {}
missing = {}
disagree = {}
for sample in use_samples:
chromosomes = all_variants[sample]
agree[sample] = {}
missing[sample] = {}
disagree[sample] = {}
for chromosome,variants in chromosomes.items():
these_agree = {}
these_missing = {}
these_disagree = {}
# first collect all variants among do novo alignments
all_de_novo_vars = {}
vars2alignment = {}
for ref_region_id,alnd_contigs in denovo_info[sample][chromosome]['variants_by_contig'].items():
alignment_name = 'variant_checks/{1}/{0}__{1}_{2}_multi_alnd.fna'.format(
sample,
chromosome,
ref_region_id[4:])
for contig_id,pos0s in alnd_contigs.items():
for pos0,(ref_char,alnd_char) in pos0s.items():
vars2alignment[pos0+1] = alignment_name
try:
all_de_novo_vars[pos0+1].add((ref_char,alnd_char))
except KeyError:
all_de_novo_vars[pos0+1] = set([(ref_char,alnd_char)])
# then compare to variants and write result to table
for pos1,((ref,query),filters) in sorted(variants.items()):
if pos1 in all_de_novo_vars:
if len(all_de_novo_vars[pos1]) == 1:
ref_char,alnd_char = all_de_novo_vars[pos1].pop()
#assert ref_char == ref, 'reference character mismatch: {},{} {},{}'.format(ref,query,ref_char,alnd_char)
if ref_char != ref:
print('WARNING: reference character mismatch: {},{} {},{}'.format(ref,query,ref_char,alnd_char))
print(pos1,vars2alignment[pos1])
if alnd_char == query:
status = "corroborated"
these_agree[pos1] = (ref,query),filters
else:
status = "negative"
these_disagree[pos1] = (ref,query),filters
else:
status = "ambiguous"
these_disagree[pos1] = (ref,query),filters
else:
status = "absent"
these_missing[pos1] = (ref,query),filters
try:
alignment = vars2alignment[pos1]
except KeyError:
alignment = 'none'
fout.write('"{}","{}",{},"{}","{}","{}","{}","{}"\n'.format(
sample,
chromosome,
pos1,
ref,
query,
status,
'+'.join(filters),
alignment
))
agree[sample][chromosome] = these_agree
disagree[sample][chromosome] = these_disagree
missing[sample][chromosome] = these_missing
print('Wrote variants and whether thay were found in their respective de '\
'novo assembled contigs in:\n{}'.format(table_outname))
# #print('summary')
for sample,chromosomes in agree.items():
for chromosome,these_agree in chromosomes.items():
print('{} aligned against {}: {} SNPs called also in de novo '\
'assemblies; {} not.'.format(sample, chromosome,
len(these_agree), len(missing[sample][chromosome])))
self.agree = agree
self.disagree = disagree
self.missing = missing
# generate name from genome + used samples in sorted order
# should be unique to this combination without needing a specific --read_group X --genome Y combination
# currently arbitrary VCFs and BAMs can be provided
# does not allow use of combined chromosomes
# could be standardised baga-wide?
hasher = _md5()
hasher.update(str([self.genome.id]+sorted(use_samples)))
unique_name = hasher.hexdigest()
self.saveLocal(unique_name)
class Filter:
'''
Methods to remove variant calls from VCFs according to position specific
filters inferred using the Structure and Repeats modules of the Bacterial
and Archaeal Genome Analyser.
'''
def __init__(self, VCF_paths, genome): #, reads):
'''
A CallVariants.Filter object must be instantiated with:
- a list of paths to VCF files
- a CollectData genome object
'''
e = 'Could not find %s.\nPlease ensure all files exist'
for VCF in VCF_paths:
assert _os.path.exists(VCF), e % VCF
self.VCF_paths = VCF_paths
self.ORF_ranges = genome.ORF_ranges
# http://www.1000genomes.org/node/101
# FILTER filter: PASS if this position has passed all filters, i.e. a call is made at this position.
# Otherwise, if the site has not passed all filters, a semicolon-separated list of codes for filters
# that fail. e.g. "q10;s50" might indicate that at this site the quality is below 10 and the number
# of samples with data is below 50% of the total number of samples. "0" is reserved and should not
# be used as a filter String.
def apply_filter_by_ranges(self, variant_rows, ranges, filter_id, sample_index = False):
'''
return variant VCF rows changing:
FILTER cell if sample_index = False (applies to all samples)
adding an entry to INFO describing which sample filtered if sample_index provided
sample_index is base-0 index of sample column order
'''
new_rows = []
filtered = {}
for row in variant_rows:
cells = row.split('\t')
pos1 = int(cells[1])
filter_status = cells[6]
info_cell = cells[7]
fail = False
for s, e in ranges:
if s < pos1 <= e:
# this variant is within region to reject
fail = True
break
if fail: #break
filtered[pos1] = cells[3:5]
if sample_index is not False:
# applies just to this sample
infos = info_cell.split(';')
# ignore entries without an "="
infos = dict([i.split('=') for i in infos if '=' in i])
if filter_id in infos:
# add this sample index to existing of previously filtered samples at this position
new_filtered_index_list = map(int,infos[filter_id].split(',')) + [sample_index]
# could assert this sample not already filtered at this position . . .
# for now silently overwrite (!)
infos[filter_id] = ','.join(map(str,sorted(set(new_filtered_index_list))))
else:
# make new entry for this index
infos[filter_id] = str(sample_index)
info_cell = ';'.join(['='.join([k,v]) for k,v in sorted(infos.items())])
cells[7] = info_cell
else:
# applies to all: change FILTER
if filter_id in filter_status:
print('WARNING: {} already has {}'.format(cells[0], filter_id))
elif filter_status == 'PASS':
filter_status = filter_id
else:
filter_status = ';'.join(filter_status.split(';') + [filter_id])
cells[6] = filter_status
#print(filter_id,pos1,s,e,cells[6],info_cell.split(';')[-1])
new_rows += ['\t'.join(cells)]
else:
new_rows += [row]
return(new_rows, filtered)
def markVariants(self, filters_to_apply):
'''
Given details of one or more filter to apply, write new VCFs with variants marked
'''
all_filtered = {}
for VCF_path in self.VCF_paths:
all_filtered[VCF_path] = {}
use_VCF_path = VCF_path
for filter_id, filter_info in filters_to_apply.items(): #break
# new VCF with suffix saved for each filter applied
print('Applying {} filter to variants in:\n{}'.format(filter_id, use_VCF_path))
header, header_section_order, colnames, variants = parseVCF(use_VCF_path)
print('{} variant positions found'.format(len(variants)))
#print('variants',variants)
sample_order = colnames[9:]
these_filtered = {}
# filter_id = 'rearrangements'
# filter_info = filters_to_apply[filter_id]
if filter_info['per_sample']:
for sample,ranges in sorted(filter_info['ranges'].items()): #break
# either vcf per sample or multi-sample vcfs
# try all samples in all vcfs to handle either scenario
if sample in sample_order:
these_filtered[sample] = {}
infos_to_add = set()
if isinstance(ranges, dict):
# sometimes 'extended' versions of filters e.g., no or few reads adjacent to disrupted regions
# add as filtername1, filtername2 etc
for n,(filter_variant,these_ranges) in enumerate(sorted(ranges.items())): #break
# variants list of rows gets iteratively added to per sample
variants, filtered = self.apply_filter_by_ranges(variants,
these_ranges,
filter_id+str(n+1),
sample_index = sample_order.index(sample))
# manually check that changes were applied
#[t.split('\t')[:8] for t in variants if int(t.split('\t')[1]) in filtered]
infos_to_add.add(filter_info['string'][n])
these_filtered[sample][filter_id+str(n+1)] = filtered
else:
# one set of filter ranges, per sample
variants, filtered = self.apply_filter_by_ranges(variants,
ranges,
filter_id,
sample_index = sample_order.index(sample))
infos_to_add.add(filter_info['string'])
these_filtered[sample] = filtered
# add filter info as INFO
header['INFO'] += list(infos_to_add)
else:
# just a single reference-genome specific filter to be applied to all samples via the FILTER property
variants, filtered = self.apply_filter_by_ranges(variants, filter_info['ranges'], filter_id)
# record filtered positions per sample even though determined by reference genome
these_filtered = dict([(sample,filtered) for sample in sample_order])
header['FILTER'] += filter_info['string']
all_filtered[VCF_path][filter_id] = these_filtered
newname = _os.path.extsep.join((use_VCF_path.split(_os.path.extsep)[:-1]))
newname = _os.path.extsep.join([newname + '__F_' + filter_id, use_VCF_path.split(_os.path.extsep)[-1]])
print('Writing all variants with filter information to:\n{}'.format(newname))
new_VCF_content = []
for header_section in header_section_order:
new_VCF_content += ['\n'.join(header[header_section])]
new_VCF_content += ['\t'.join(colnames)]
new_VCF_content += ['\n'.join(variants)]
new_VCF_content = '\n'.join(new_VCF_content)
open(newname, 'w').write(new_VCF_content)
use_VCF_path = newname
self.all_filtered = all_filtered
def reportFiltered(self, to_csv = True):
'''
Generate a "comma separated values" (.csv) text file for loading into a
spreadsheet or importing into a document that summarises variants in one or
more VCF files. If to_csv = False, parsed variants are only stored as an
attribute for further analysis.
'''
#self.known_filters['genome_repeats']['per_sample']
for VCF, filters in sorted(self.all_filtered.items()):
print('Organising filtered variants from:\n{}'.format(VCF))
per_sample_per_position_info = {}
for this_filter, info in filters.items():
for sample, info2 in info.items():
if sample not in per_sample_per_position_info:
per_sample_per_position_info[sample] = {}
# if info2 dict contains dicts, a subfilter was applied
# else a single main filter which needs a single iteration
try:
a_value = info2.values()[0]
except IndexError:
# could be empty though: nothing to report
continue
if isinstance(a_value, dict):
for sub_filter, positions in info2.items():
for position, (ref_char_state, sample_char_state) in positions.items():
if position in per_sample_per_position_info[sample]:
per_sample_per_position_info[sample][position] += [(ref_char_state, sample_char_state, sub_filter)]
else:
per_sample_per_position_info[sample][position] = [(ref_char_state, sample_char_state, sub_filter)]
else:
for position, (ref_char_state, sample_char_state) in info2.items():
if position in per_sample_per_position_info[sample]:
per_sample_per_position_info[sample][position] += [(ref_char_state, sample_char_state, this_filter)]
else:
per_sample_per_position_info[sample][position] = [(ref_char_state, sample_char_state, this_filter)]
# save for general use
self.per_sample_per_position_info = per_sample_per_position_info
if to_csv:
outfilename = VCF[:-3] + 'filtered.csv'
print('Writing list of filtered variants to:\n{}'.format(outfilename))
with open(outfilename, 'w') as fout:
colnames = ['sample', 'position', 'CDS', 'reference', 'variant', 'filter']
fout.write(','.join(['"'+c+'"' for c in colnames])+'\n')
for sample, positions in sorted(per_sample_per_position_info.items()):
for position, info in sorted(positions.items()):
ORFs = [ORF for ORF,(s, e, d, name) in self.ORF_ranges.items() if s < position <= e]
ORFnames = []
for ORF in ORFs:
if len(name):
ORFnames += ['{} ({})'.format(ORF,name)]
else:
ORFnames += ['{}'.format(ORF)]
if len(ORFnames) > 0:
ORF = '"'+','.join(ORFnames)+'"'
else:
ORF = '""'
for (ref_char_state, sample_char_state, this_filter) in info:
row_cells = ['"'+sample+'"', str(position), ORF, '"'+ref_char_state+'"', '"'+sample_char_state+'"', '"'+this_filter+'"']
fout.write(','.join(row_cells)+'\n')
def doFiltering(self, filters):
'''
filter dict keys must be in known_filters dict below
values are a list or dict of ranges
'''
# do some checks on provided genome and VCFs
genome_ids = {}
genome_lengths = {}
pattern = _re.compile('##contig=<ID=([A-Za-z0-9\._]+),length=([0-9]+)>')
for VCF in self.VCF_paths: #break
for line in open(VCF):
if line[:9] == '##contig=':
try:
genome_id, genome_length = _re.match(pattern, line).groups()
except AttributeError:
print('Failed to parse genome information from {}'.format(line))
genome_lengths[int(genome_length)] = VCF
genome_ids[genome_id] = VCF
#print(genome_id, genome_length)
identified = True
break
e = "Failed to identify which chromosome the variants in {} were called on (couldn't find '##contig=')".format(VCF)
assert identified, e
e = 'Differing reference genome among provided VCFs? {}'.format(genome_ids.items())
assert len(genome_ids) == 1, e
## is this only part that uses genome (i.e. not necessary)
# e = 'Genome provided ({}) does not match genome ID in provided VCFs: {}'.format(self.genome_genbank_record.id, genome_ids.keys()[0])
# assert self.genome_genbank_record.id == genome_ids.keys()[0], e
# print('Variants were called against {:,} bp genome: {}\n'.format(int(genome_length), genome_id))
filters_to_apply = {}
for this_filter, ranges in filters.items():
try:
filters_to_apply[this_filter] = known_filters[this_filter]
filters_to_apply[this_filter]['ranges'] = ranges
except KeyError:
print('Unknown filter type: {}. Choose from {}'.format(this_filter, ', '.join(known_filters) ))
self.markVariants(filters_to_apply)
self.reportFiltered()
class Linkage:
'''Methods to measure co-incidence of alleles on the same reads or fragments.
Currently baga.CollectData.Genome only supports single chromosome genomes
This test should be run separately for each reference genome (chromosome) against which
reads are mapped against.
Alleles on the same reads (and therefore same chromosomes) called at
polymorphisms in a sample of pooled genomic DNA. Infrequent co-incidence of
variants on the same read in nearby polymorphisms implies variants occuring
in different genomes in the sample (separate lineages) and has been described
as a "multidiverse" signature in:
Lieberman, T. D., Flett, K. B., Yelin, I., Martin, T. R., McAdam, A. J., Priebe,
G. P. & Kishony, R.
Genetic variation of a bacterial pathogen within individuals with cystic
fibrosis provides a record of selective pressures.
Nature Genetics, 2013, 46, 82-87
'''
def __init__(self, vcf_paths = False,
alignment_paths = False,
genome = False,
baga = False):
"""
Instatiate a baga.CallVariants.Linkage object.
Requires either:
- a list of baga genomes (current implementation means: chromosomes)
(genomes = )
- a list of BAM file paths (alignment_paths = )
- a list to VCF file paths (VCF_paths = )
or
- a path to a saved baga.CallVariants.Linkage object (baga =)
"""
e = 'Instantiate with an alignment object and paths to VCF files or ' + \
'a previously saved CallerGATK'
assert ((genome and alignment_paths and vcf_paths) and not baga) or \
(not (genome and alignment_paths and vcf_paths) and baga), e
if alignment_paths:
e = 'Could not find file: "%s".\nPlease ensure all files exist'
for VCF in vcf_paths:
assert _os.path.exists(VCF), e % VCF
self.VCF_paths = vcf_paths
for BAM in alignment_paths:
assert _os.path.exists(BAM), e % BAM
self.alignment_paths = alignment_paths
self.genome = genome
elif baga:
with _tarfile.open(baga, "r:gz") as tar:
for member in tar:
contents = _StringIO(tar.extractfile(member).read())
try:
# either json serialised conventional objects
contents = _json.loads(contents.getvalue())
except ValueError:
#print('json failed: {}'.format(member.name))
# or longer python array.array objects
contents = _array('c', contents.getvalue())
setattr(self, member.name, contents)
def saveLocal(self, name):
'''
Save processed baga object info to a local compressed pickle file.
'name' should exclude extension: .baga will be added
'''
fileout = 'baga.CallVariants.Linkage-%s.baga' % name
with _tarfile.open(fileout, "w:gz") as tar:
print('Writing to {} . . . '.format(fileout))
for att_name, att in self.__dict__.items():
if isinstance(att, _array):
io = _StringIO(att.tostring())
io.seek(0, _os.SEEK_END)
length = io.tell()
io.seek(0)
thisone = _tarfile.TarInfo(name = att_name)
thisone.size = length
tar.addfile(tarinfo = thisone, fileobj = io)
else:
# try saving everything else here by jsoning
try:
io = _StringIO()
_json.dump(att, io)
io.seek(0, _os.SEEK_END)
length = io.tell()
io.seek(0)
thisone = _tarfile.TarInfo(name = att_name)
thisone.size = length
tar.addfile(tarinfo = thisone, fileobj = io)
except TypeError:
# ignore non-jsonable things like functions
# include unicodes, strings, lists etc etc
#print('omitting {}'.format(att_name))
pass
def parsePooledVCF(self, minGQ = 0):
'''extract variant information from a VCF with ploidy > 1 e.g. pooled'''
def do_type(v):
try:
return int(v)
except ValueError:
try:
return float(v)
except ValueError:
return v
pooled_variants = {}
for VCF in self.VCF_paths:
header, header_section_order, colnames, variants = parseVCF(VCF)
headerdict = dictify_vcf_header(header)
these_variants = {}
for variantline in variants:
chrm, pos, x, ref_char, alt_chars, s, filter_status, info1, info2keys, info2values = variantline.rstrip().split('\t')
if filter_status == 'PASS':
# parse VCF line
info1_extra = set([a for a in info1.split(';') if '=' not in a])
info1 = dict([a.split('=') for a in info1.split(';') if '=' in a])
del info1['set']
info2 = dict(zip(info2keys.split(':'),info2values.split(':')))
allinfo = {}
for k,v in info1.items()+info2.items():
allinfo[k] = do_type(v)
# first in list is ref, others are alts
allinfo['reference'] = ref_char
allinfo['variants'] = alt_chars.split(',')
allinfo['extra'] = set(info1_extra)
allinfo['GT'] = tuple(map(int,allinfo['GT'].split('/')))
### set stringency here: Phred-scaled confidence for GT
### <10 is <90%, <20 is <99%, >20 is generally desirable
### however GT isn't well suited to the artificially high
### ploidy samples containing e.g. >10 samples because
### of the greater demand on precision versus presence/absence
### i.e. a low GQ for a 20/40 SNP doesn't imply the SNP call
### itself is a false positive
if allinfo['GQ'] >= minGQ:
try:
these_variants[chrm][int(pos)] = allinfo
except KeyError:
these_variants[chrm] = {}
these_variants[chrm][int(pos)] = allinfo
pooled_variants[VCF] = these_variants
self.pooled_variants = pooled_variants
def collectAdjacentPolymorphisms(self, dist = 1000):
'''collect polymorphsims within a specific distance on chromosome'''
clusters = {}
for VCF,chromosomes in sorted(self.pooled_variants.items()):
clusters[VCF] = {}
for chromosome,variants in chromosomes.items():
positions = sorted(variants)
done = set()
these_clusters = []
for n,p1 in enumerate(positions):
this_cluster = []
for p2 in positions[(n+1):]:
if p2 - p1 < dist:
this_cluster += [p2]
else:
if len(this_cluster) > 0:
this_cluster += [p1]
this_cluster = sorted(set(this_cluster) - done)
if len(this_cluster) > 0:
if len(this_cluster) == 1:
this_cluster += [these_clusters[-1][-1]]
these_clusters += [sorted(this_cluster)]
done.update(this_cluster)
break
clusters[VCF][chromosome] = these_clusters
self.clusters = clusters
def check_loci_in_read(self, check_loci_pos1, refseq, refread0_to_varread0, chrm1_to_refread0, r):
'''Check coincidence of alleles on single reads from pooled gDNA samples'''
alleles_per_loci = {}
for n,(locus_pos1,alleles) in enumerate(check_loci_pos1):
ref_char = alleles[0]
var_chars = alleles[1:]
## get this aligned region of ref chromosome
# (equivalent to read without any variant positions)
# start position first
refread_start0 = chrm1_to_refread0[locus_pos1]
# end of each piece is either next variant or the end of the sequence
if n == len(check_loci_pos1) - 1:
# this is last variant: select to end of alignment (of a read with no variants)
refread_end0 = len(refseq) - 1
else:
# next variant does not align in a read if it is spanned by a deletion
# at this or another variant <== not tested yet
# ==> if long del absent: no problem; if del present, r.reference_end
# will be extended because it is defined by the length of alignment, not
# read length
refread_end0 = chrm1_to_refread0[check_loci_pos1[n+1][0]]
len(chrm1_to_refread0)
this_piece_ref = str(refseq[refread_start0:refread_end0])
## get this aligned region of read using provided alignment
# (containing none, some or all variants in an unknown combination of alleles
# for comparison with refread)
# start of a slice is at pos0
# If var read has a deletion at start of this segment, those positions will
# be without key (refread0 index) in refread0_to_varread0 because no
# homologous sequence in refread0.
# To collect appropriate, variant-containing, segment from r.query_sequence
# as a slice: refread0-1 => varread0; then varread0+1 is correct slice start.
# If var read has an insertion at start of this segment, those positions
# will be without value (varread0 index) in refread0_to_varread0 because no
# homologous sequence in varread0.
# to collect appropriate, variant-containing, segment from r.query_sequence
# as a slice: refread0-1 => varread0; then varread0+1 is correct slice start.
if refread_start0 == 0:
# for SNPs at first position
varread_start0 = refread0_to_varread0[refread_start0]
else:
try:
# this should fail on indels without: -1 in ref index, +1 in read index
varread_start0 = refread0_to_varread0[refread_start0-1]+1
except KeyError:
alleles_per_loci[locus_pos1] = 'noisy segment: unexpected read alignment'
continue
try:
# Fail e.g. 1) deletion up to end -1 of read causes
# refread0_to_varread0[refread_end0-1]+1 to fail, just omitting -1 +1
# fixes this.
# Not encountered: if next variant is a deletion, the pos0 ending slice
# will be at a position not aligned between ref and read, so -1 +1 also
# needed (did offsetting indels cause this?).
varread_end0 = refread0_to_varread0[refread_end0-1]+1
except KeyError:
varread_end0 = refread0_to_varread0[refread_end0]
# Other failures outside of this should be due to noisy reads and can be
# ignored (i.e., store read as noisy).
this_piece_read = r.query_sequence[varread_start0:varread_end0]
this_piece_read_qualities = r.query_qualities[varread_start0:varread_end0]
# A) does ref segment == (naive) read segment?
variant_found = False
if this_piece_ref == this_piece_read:
# no variants here, record as so, then continue to next segement
if all([len(char) == 1 for char in var_chars]):
alleles_per_loci[locus_pos1] = 'no mutation'
else:
# revert position to pre-indel as in VCFs for recording
alleles_per_loci[locus_pos1-1] = 'no mutation'
continue
else:
# This bit applies the actual changes: which variant (allele) makes ref
# segment == var segment?
# For reads of a population, need to try each allele to see if any match
allele_found = False
for var_char in var_chars:
# Test the region affected by latest varinat only (independent from
# prior variants).
# This work for insertions i.e. replace '' at start of ref segment
# does the insertion at the beginning.
this_piece_ref_with_var = this_piece_ref.replace(ref_char, var_char, 1)
# Is mutated refseq like the read? (aligned bit of chromosome plus
# leading unaligned bit after mutation i.e., variant present)
# initial compare lengths.
if len(this_piece_ref_with_var) != len(this_piece_read):
# This variant not here, check next variant (allele)
# Assume because this allele is absent but could also be because
# of apparent mutation elsewhere in segment (noise because wasn't
# called across all the reads in the 'pileup')
continue
else:
# given equal length after 'mutating' ref, after omitting low
# confidence positions, does ref segment with variant applied
# equal read segment?
this_piece_ref_with_var_qualpass = []
this_piece_read_qual_pass = []
for i,(char,qual) in enumerate(zip(this_piece_read, this_piece_read_qualities)):
if qual >= 20:
this_piece_read_qual_pass += [char]
this_piece_ref_with_var_qualpass += [this_piece_ref_with_var[i]]
else:
# these will be invisible in comparison
this_piece_read_qual_pass += ['-']
this_piece_ref_with_var_qualpass += ['-']
this_piece_read_qual_pass = ''.join(this_piece_read_qual_pass)
this_piece_ref_with_var_qualpass = ''.join(this_piece_ref_with_var_qualpass)
if this_piece_read_qual_pass == this_piece_ref_with_var_qualpass:
# this is the variant here, record locus and allele of this variant
allele_found = True
if all([len(char) == 1 for char in var_chars]):
alleles_per_loci[locus_pos1] = var_char
else:
# revert position to pre-indel as in VCFs for recording
alleles_per_loci[locus_pos1-1] = var_char
# pieces_this_read += [this_piece_read]
# on to next segment
break
else:
# this variant not here, check next variant (allele)
# assume because this allele is absent but could also be
# because of apparent mutation elsewhere in segment (noise
# because wasn't called across all the reads in the 'pileup')
continue
# This bit is currently assuming noise in an earlier segment is only a
# problem for that segment: not later segements.
# Previously, whole read abandoned.
if not allele_found:
if all([len(char) == 1 for char in var_chars]):
alleles_per_loci[locus_pos1] = 'noisy segment: undetermined'
else:
# revert position to pre-indel as in VCFs for recording
alleles_per_loci[locus_pos1-1] = 'noisy segment: undetermined'
return(alleles_per_loci)
def check_within_frags(self, spanning_frags, corrected_indel_alleles, these_reads, these_reads_bypos0_indel_offsets, minMappingQuality = 60):
'''Check coincidence of alleles on paired reads (fragements) from pooled gDNA samples'''
alleles_per_loci_per_frag = {}
for fragID in spanning_frags: # break
r1 = these_reads[fragID, True]
r2 = these_reads[fragID, False]
# check alignment quality is adequate
if r1.mapq < minMappingQuality or r2.mapq < minMappingQuality:
alleles_per_loci_per_frag[fragID] = 'low quality alignment'
continue
# 1) b) trim out read-aligned region of chromosome for attempting to apply all
# reported variants and recording which are present.
refseq1 = str(self.genome.sequence[r1.reference_start:r1.reference_end].tostring())
refseq2 = str(self.genome.sequence[r2.reference_start:r2.reference_end].tostring())
# 2) a) convert base-1 chromosome index of variant to base-0 read as aligned to
# chromosome forward strand ==> a position mapping dictionary chrm1_to_refread0.
chrm1_to_refread0_r1 = dict(zip(range(r1.reference_start+1,r1.reference_end+1),
range(r1.reference_end-r1.reference_start)))
refread0_to_chrm1_r1 = dict([(v,k) for k,v in chrm1_to_refread0_r1.items()])
varread0_to_chrm1_r1 = dict(
[(read0,chrm0+1) for read0,chrm0 in r1.aligned_pairs if \
None not in (read0,chrm0)])
chrm1_to_varread0_r1 = dict(
[(chrm0+1,read0) for read0,chrm0 in r1.aligned_pairs if \
None not in (read0,chrm0)])
varread0_to_refread0_r1 = dict(
[(read0,chrm1_to_refread0_r1[chrm0+1]) for read0,chrm0 in r1.aligned_pairs if \
None not in (read0,chrm0)])
refread0_to_varread0_r1 = dict(
[(chrm1_to_refread0_r1[chrm0+1],read0) for read0,chrm0 in r1.aligned_pairs if \
None not in (read0,chrm0)])
chrm1_to_refread0_r2 = dict(zip(range(r2.reference_start+1,r2.reference_end+1),
range(r2.reference_end-r2.reference_start)))
refread0_to_chrm1_r2 = dict([(v,k) for k,v in chrm1_to_refread0_r2.items()])
varread0_to_chrm1_r2 = dict(
[(read0,chrm0+1) for read0,chrm0 in r2.aligned_pairs if \
None not in (read0,chrm0)])
chrm1_to_varread0_r2 = dict(
[(chrm0+1,read0) for read0,chrm0 in r2.aligned_pairs if \
None not in (read0,chrm0)])
varread0_to_refread0_r2 = dict(
[(read0,chrm1_to_refread0_r2[chrm0+1]) for read0,chrm0 in r2.aligned_pairs if \
None not in (read0,chrm0)])
refread0_to_varread0_r2 = dict(
[(chrm1_to_refread0_r2[chrm0+1],read0) for read0,chrm0 in r2.aligned_pairs if \
None not in (read0,chrm0)])
# 2) b) collect polymorphic loci with alleles potentially in this read
# can be multiple alleles per locus
# confirm at least 2 polymorphic loci are spanned by this read
these_pos0 = [pos0 for pos0,rIDs in these_reads_bypos0_indel_offsets.items() if \
((fragID, True) in rIDs) or ((fragID, False) in rIDs)]
if len(these_pos0) < 2:
print(
'*** problem with this read: spans {} variants ***'.format(
len(these_pos0))
)
# Collect the adjusted strings where indels occurred (omit the preceeding,
# identical character) for this read.
check_loci_pos1 = sorted(
[(pos1,these_allele_strings) for pos1,these_allele_strings in \
corrected_indel_alleles.items() if \
pos1-1 in these_pos0])
# 5) apply SNPs and indels to aligned region of chromosome and compare with
# each query read sequence:
# Compile list of read segments the first of which is always like reference
# and starts list.
# Then check each subsequent for variants at start.
# Slicing at ORF0 position includes each ORF0 position at beginning of each
# piece.
# pieces_orig = [str(refseq[:chrm1_to_refread0[check_loci_pos1[0][0]]])]
# pieces_this_read = [str(refseq[:chrm1_to_refread0[check_loci_pos1[0][0]]])]
# some noisy reads might still cause exceptions but need to double check which
# exceptions are due to noise/alignment ambiguity . .
check_loci_pos1_use = [(pos1,alleles) for pos1,alleles in check_loci_pos1 if \
pos1 in chrm1_to_refread0_r1]
alleles_per_loci_r1 = self.check_loci_in_read(check_loci_pos1_use,
refseq1,
refread0_to_varread0_r1,
chrm1_to_refread0_r1,
r1)
check_loci_pos1_use = [(pos1,alleles) for pos1,alleles in check_loci_pos1 if \
pos1 in chrm1_to_refread0_r2]
alleles_per_loci_r2 = self.check_loci_in_read(check_loci_pos1_use,
refseq2,
refread0_to_varread0_r2,
chrm1_to_refread0_r2,
r2)
# There shouldn't now be any 'empty' results: all polymorphic loci should
# report be either 'no mutation', 'noisy' or the allele.
alleles_per_loci_per_frag[fragID] = dict(alleles_per_loci_r1.items() + \
alleles_per_loci_r2.items())
return(alleles_per_loci_per_frag)
def check_within_reads(self, spanning_reads, corrected_indel_alleles, these_reads, these_reads_bypos0_indel_offsets, minMappingQuality = 60):
'''
Check coincidence of alleles on single reads from pooled gDNA samples
Reads may be from paired-end fragments
'''
alleles_per_loci_per_read = {}
for fragID, is_read1 in spanning_reads:
r = these_reads[fragID, is_read1]
# Check alignment quality is adequate
if r.mapq < minMappingQuality:
alleles_per_loci_per_read[(fragID, is_read1)] = 'low quality alignment'
continue
# 1) b) trim out read-aligned region of chromosome for attempting to apply
# all reported variants and recording which are present.
refseq = str(self.genome.sequence[r.reference_start:r.reference_end].tostring())
# 2) a) convert base-1 chromosome index of variant to base-0 read as
# aligned to chromosome forward strand ==> a position mapping dictionary
# chrm1_to_refread0.
#
# reference_start = 0-based leftmost coordinate == left end of slice
# reference_end = aligned reference position of the read on the reference genome
# reference_end = aend which points to one past the last aligned
# residue == right end of a (pos0) slice.
# r.reference_start,r.reference_end form a python slice as prepared by
# pySAM, not an inclusive list of indexes.
# range(5) == [0,1,2,3,4]
# range(range5[0]+1,range5[-1]+2) == [1,2,3,4,5]
# range(1,4)
# range(3) == [0,1,2,3,4][0:3] == [0,1,2]
#
# r.qstart not necessary here because this is 'ref read'; r.qstart is
# accounted for by using r.aligned_pairs below (I think... non-0 r.qstart not tested)
chrm1_to_refread0 = dict(zip(range(r.reference_start+1,r.reference_end+1),
range(r.reference_end-r.reference_start)))
refread0_to_chrm1 = dict([(v,k) for k,v in chrm1_to_refread0.items()])
# Using the pysam-supplied .aligned_pairs
# (22, 4015415),
# (23, None), <== omit from dicts
# (24, 4015416),
varread0_to_chrm1 = dict(
[(read0,chrm0+1) for read0,chrm0 in r.aligned_pairs if \
None not in (read0,chrm0)])
chrm1_to_varread0 = dict(
[(chrm0+1,read0) for read0,chrm0 in r.aligned_pairs if \
None not in (read0,chrm0)])
varread0_to_refread0 = dict(
[(read0,chrm1_to_refread0[chrm0+1]) for read0,chrm0 in r.aligned_pairs if \
None not in (read0,chrm0)])
refread0_to_varread0 = dict(
[(chrm1_to_refread0[chrm0+1],read0) for read0,chrm0 in r.aligned_pairs if \
None not in (read0,chrm0)])
# 2) b) collect polymorphic loci with alleles potentially in this read.
# Can be multiple alleles per locus.
# Confirm at least 2 polymorphic loci are spanned by this read.
these_pos0 = [pos0 for pos0,rIDs in these_reads_bypos0_indel_offsets.items() if (fragID, is_read1) in rIDs]
### reads always span a maximum of two variants even when total to check is >>2
#print('len(these_pos0) == {}; len(these_reads_bypos0_indel_offsets) == {}'.format(len(these_pos0), len(these_reads_bypos0_indel_offsets)))
### more than two screw up check_loci_in_read() but never seems to happen
if len(these_pos0) < 2:
print(
'*** problem with this read: spans {} variants ***'.format(
len(these_pos0))
)
# Collect the adjusted strings where indels occurred (omit the preceeding,
# identical character) for this read.
check_loci_pos1 = sorted(
[(pos1,these_allele_strings) for pos1,these_allele_strings in \
corrected_indel_alleles.items() if \
pos1-1 in these_pos0])
#print('len(check_loci_pos1) == {}'.format(len(check_loci_pos1)))
# 5) apply SNPs and indels to aligned region of chromosome and compare with
# each query read sequence:
# Compile list of read segments the first of which is always like reference
# and starts list.
# Then check each subsequent for variants at start.
# Slicing at ORF0 position includes each ORF0 position at beginning of each piece
# pieces_orig = [str(refseq[:chrm1_to_refread0[check_loci_pos1[0][0]]])]
# pieces_this_read = [str(refseq[:chrm1_to_refread0[check_loci_pos1[0][0]]])]
# some noisy reads might still cause exceptions but need to double check which
# exceptions are due to noise/alignment ambiguity . .
alleles_per_loci = self.check_loci_in_read(check_loci_pos1,
refseq,
refread0_to_varread0,
chrm1_to_refread0,
r)
# there shouldn't now be any 'empty' results: all polymorphic loci should
# report be either 'no mutation', 'noisy' or the allele.
alleles_per_loci_per_read[fragID, is_read1] = alleles_per_loci
return(alleles_per_loci_per_read)
def checkAlignments(self):
'''
parse BAM files checking for variants on same read or read pair (sequenced fragment)
These notes are 'draft' :-)
|-----template_length,query_length---|
-R1---\
*============*======
\---R2- if r.is_read2, query_length; pos
if r.is_read1: r.template_length + r.query_length == r.pnext - r.reference_start
really same as r.is_read1?
if r.is_read2: r.template_length + r.query_length == r.pnext - r.reference_start
insert end position
r.reference_start + r.template_length
length of this read
r.query_length
length of other read
r.template_length - (r.pnext - r.reference_start)
Track each variant in a read from start to finish building a 1-to-1 mapping of
positions. This makes checking presence or absence variants much easier.
0) with potentially linked variants (pos,(r,q)) and many read/read pairs in hand.
1) a) collect single reads and fragments to be analysed based on spanning of at
least two clustered variants. Then iterate through reads:
1) b) trim out read-aligned region of chromosome for attempting to apply all
reported variants and recording which are present.
2) a) convert base-1 chromosome index of variant to base-0 read as aligned to
chromosome forward strand ==> a position mapping dictionary chrm1_to_refread0.
2) b) collect variants with alleles potentially in this read.
3) adjust method (position) of indels as reported: position is where indel
happened and one string is length zero.
4) make another mapping dict refread0_to_chrm1: chrm1_to_refread0 and
refread0_to_chrm1 need to be updated as refread0 gets deletions (as varread0
gets insertions)
5) apply SNPs and indels to query ORF sequence:
thisORFseq_variant,
refread0_2_varread0,
varread0_2_refread0 = applyVariantsGetMappings(thisORFseq,
chrm1_to_refread0,
check_variants)
Variants reported relative to ref read can be converted to position in variant
read and vice versa.
'''
# NB:
# pysam is pos0
# VCFs are pos1
# really inserts because read pairs share ID
num_reads_by_pos1_indel_offsets = {}
# by VCF, by chromosome, by cluster, by [reads and/or fragment]
polymorphism_linkage_bypop = {}
# clusters[VCF][chromosome]
# assumes lists of VCFs and BAMs correspond by name: not checked
VCF2BAM = dict(zip(sorted(self.VCF_paths),sorted(self.alignment_paths)))
for VCF,chromosomes in self.clusters.items():
polymorphism_linkage_bypop[VCF] = {}
for chromosome,near_vars in chromosomes.items():
# Currently baga.CollectData.Genome only supports single chromosome
# genomes.
# This test should be run separately for each reference genome
# (chromosome) mapped against
if chromosome != self.genome.id:
polymorphism_linkage_bypop[VCF][chromosome] = {}
print(
"Skipping chromosome {} present in BAM because "
"it doesn't match supplied genome: {}".format(
chromosome, self.genome.id))
continue
# 0) with potentially linked variants (pos,(r,q)) and many read/read
# pairs in hand . . .
print('Collecting variants for {} in {}'.format(chromosome, VCF))
## lists to tuples for use as keys and +1 for indels
# # for use as key to check results at the end
near_vars_tuples = []
# for use in analysing sets of reads
near_vars_indel_offsets = []
# for collecting all the required reads
to_get_reads_indel_offsets = []
corrected_indel_alleles = {}
# for getting distance beyond a potential deletion that read needs to span
# (not fully implemented yet?)
indel_max_allele_lengths = {'del':{},'ins':{}}
for these_vars in near_vars:
these_vars_indel_offsets = []
for pos1 in these_vars:
these_allele_strings = self.pooled_variants[VCF][chromosome][pos1]['variants']
longest_length = max(map(len,these_allele_strings))
if longest_length > 1:
to_get_reads_indel_offsets += [pos1+1]
these_vars_indel_offsets += [pos1+1]
corrected_indel_alleles[pos1+1] = [v[1:] for v in these_allele_strings]
if len(these_allele_strings[0]) > 1:
indel_max_allele_lengths['del'][pos1+1] = len(
these_allele_strings[0]
)-1
if len(these_allele_strings[0]) < longest_length:
indel_max_allele_lengths['ins'][pos1+1] = longest_length-1
else:
to_get_reads_indel_offsets += [pos1]
these_vars_indel_offsets += [pos1]
corrected_indel_alleles[pos1] = these_allele_strings
near_vars_indel_offsets += [these_vars_indel_offsets]
near_vars_tuples += [tuple(these_vars)]
# get the reads out of the BAM file
reads = _pysam.Samfile(VCF2BAM[VCF])
# all variants positions
these_reads_bypos0_indel_offsets = {}
these_reads = {}
# Fetch all reads spanning all variant positions for this sample
# => some variant positions will span reads from chromosomes with
# deletions (which should be indicated in called variants).
# -> i) indels will be treated as occurring at +1 their reported
# positions which must be accounted for when collecting reads.
# -> ii) deletions will cause alignment length increase, insertions
# alignments to ref will decrease in length.
# => sometimes a fragment, identifiable by r.qname, appears twice in the
# over the region of interest as r.is_read1 and also as r.is_read1.
# -> therefore reads must be stored using fragment name (query_name) and
# is_read1 (yes or no, if no its read 2).
for pos1 in to_get_reads_indel_offsets:
# a pos0 slice from a pos1 index
reads_iter = reads.fetch( chromosome, pos1-1, pos1)
these_reads_bypos0_indel_offsets[pos1-1] = {}
try:
read_end_offset = indel_max_allele_lengths['del'][pos1]
except KeyError:
read_end_offset = 0
for r in reads_iter:
# filter reads that don't span potential deletions. Need all of
# a potential deletion present within aligned region to replace
# in ref read.
# Rd: ===========ddddd==
# Rf: -------------V-------- <== reads must not only span
# V but also beyond end of
# ddddd like Rd here
if pos1 + read_end_offset <= r.reference_end:
# pos0 end slice == pos1 end inclusive and must be present in
# read alignment.
these_reads_bypos0_indel_offsets[pos1-1][(r.query_name,r.is_read1)] = r
these_reads[(r.query_name,r.is_read1)] = r
num_reads_by_pos1_indel_offsets[pos1] = len(
these_reads_bypos0_indel_offsets[pos1-1])
linkages = {}
for these_positions,these_positions_indel_offsets in zip(near_vars_tuples,
near_vars_indel_offsets):
# 1) a) collect single reads and fragments to be analysed based on
# spanning of at least two clustered variants.
# if len(set(these_positions_indel_offsets) & \
# set(indel_max_allele_lengths['ins'])):
# print(
# 'insertion at %s' % ','.join(map(str,
# set(these_positions_indel_offsets) & \
# set(indel_max_allele_lengths['ins'])
# )))
# elif len(set(these_positions_indel_offsets) & \
# set(indel_max_allele_lengths['del'])):
# print(
# 'deletion at %s' % ','.join(map(str,
# set(these_positions_indel_offsets) & \
# set(indel_max_allele_lengths['del'])
# )))
# This assumes all variants are recorded in near_vars - additional low
# confidence/noise will render reads unusable . . .
# Not if e.g. SNP prevents recognition of mutated ref segment but
# also != without the mutation.
# Collect read pairs spanning at least two variants (potential linkage
# info; either in a single read or both each end of a fragment)
# sort locus and read info per fragment
read_at_locus_per_frag = _defaultdict(list)
for pos1 in these_positions_indel_offsets:
for fragID, is_read1 in these_reads_bypos0_indel_offsets[pos1-1]:
read_at_locus_per_frag[fragID] += [(pos1,is_read1)]
read_at_locus_per_frag = dict(read_at_locus_per_frag)
# collect all reads spanning two or more loci (read 1 or 2)
spanning_reads = []
spanning_frags = []
for fragment, reads in read_at_locus_per_frag.items():
read1s = set()
read2s = set()
for locus,isread1 in reads:
if isread1:
read1s.add(locus)
else:
read2s.add(locus)
if len(read1s) > 1:
spanning_reads += [(fragment,True)]
if len(read2s) > 1:
spanning_reads += [(fragment,False)]
# Some of these fragments will include reads with two loci
# themselves;
# Will need merging after analyses avoiding double counts of
# linkage etc. Need to ensure each read spans a _different_
# polymorphism if collecting a fragment.
if len(read1s - read2s) >= 1 and len(read2s - read1s) >= 1:
spanning_frags += [fragment]
spanning_reads = set(spanning_reads)
spanning_frags = set(spanning_frags)
print('{}:\n\tpositions: {} ({} bp):\n'
'\treads spanning >=2 pos {};\n'
'\tread pairs spanning >=2 pos {};\n'
'\twith >=2 in a read {}'.format(
VCF,
','.join(map(str,these_positions)),
these_positions[-1]-these_positions[0],
len(spanning_reads),
len(spanning_frags),
len(set([a for a,b in spanning_reads]) & spanning_frags)))
## check for linkage of alleles spanned by single reads
alleles_per_loci_per_read = self.check_within_reads(
spanning_reads,
corrected_indel_alleles,
these_reads,
these_reads_bypos0_indel_offsets)
## check for linkage of alleles spanned by 2 reads of same fragments
alleles_per_loci_per_frag = self.check_within_frags(
spanning_frags,
corrected_indel_alleles,
these_reads,
these_reads_bypos0_indel_offsets)
## check whether fragment-level counts and read-level counts include same variant
## merge results . . . not implemented yet <==============
overlap = set(
[a[0] for a in alleles_per_loci_per_read if isinstance(a,tuple)]) & \
set(alleles_per_loci_per_frag)
overlap2 = set(
[a for a in alleles_per_loci_per_read if \
isinstance(a,tuple) and \
a[0] in alleles_per_loci_per_frag])
if len(overlap) > 0:
print(
'*** overlap between within read and fragment linkage reports ***'
)
print('{}\n{}'.format(len(overlap),len(overlap2)))
#print('{}\n{}\n{}'.format(overlap,overlap2,alleles_per_loci_per_frag))
## at this stage reads are saved with keys as just the fragment name (ID)
## if on a single read, or as tuple with is_read1 if on a fragment
## ==> should these have same type of key here? add is_read1 to single reads?
linkages[these_positions] = dict(
alleles_per_loci_per_read.items() + \
alleles_per_loci_per_frag.items())
polymorphism_linkage_bypop[VCF][chromosome] = linkages
self.polymorphism_linkage_bypop = polymorphism_linkage_bypop
def compare_allele_freqs(self, reads, VCF, chromosome):
'''Count allele frequencies with selected reads for tabulation'''
some_freqs = []
num_reads = 0
for oligo,alleles in reads.items():
if isinstance(alleles,str):
# read not mapped with high enough quality (MQ = 60)
# ==> do not include in total read count
continue
if 'noisy segment: undetermined' in alleles:
# alignment at one or other position has low position score or alignment is ambiguous
# ==> do not include in total read count
continue
if isinstance(oligo,tuple):
fragID = oligo[0]
else:
fragID = oligo
num_reads += 1
for pos1,allele in alleles.items():
# correct for differences in indel position reporting ... 4015416 includes insertions but is not at pos1+1 ... expected all indels to be pos1+1
if pos1 in self.pooled_variants[VCF][chromosome]:
use_pos1 = pos1
else:
use_pos1 = pos1 - 1
# correct how indels are reported for comparisons to VCF info
these_alleles = self.pooled_variants[VCF][chromosome][use_pos1]['variants']
longest_length = max(map(len,these_alleles))
if longest_length > 1:
these_alleles = [a[1:] for a in these_alleles]
if allele in these_alleles:
some_freqs += [use_pos1]
some_freqs = _Counter(some_freqs)
for pos1,b in some_freqs.items():
freq_at_this_locus_in_reads = int(round(40*(b/float(num_reads))))
freq_at_this_locus_in_calls = len(
filter(
lambda x: x != 0,
self.pooled_variants[VCF][chromosome][pos1]['GT']
))
print('At {}, counted in reads: {}/40, called {}/40'.format(
pos1,
freq_at_this_locus_in_reads,
freq_at_this_locus_in_calls))
return(some_freqs)
def tabulateResults(self, min_spanning_read_depth = 20):
'''prepare tables containing linkage information.
One table compares frequencies of each allele included (which checks accuracy of
counting method). The column headers are:
"Chromosome position"
"Observed Frequencies"
"Called Frequencies"
The other table contains biological info i.e., which bit of chromosome, its function,
groups of linked variants (only pairs in this case so row per pair). The column
headers are:
"Variant pair positions"
"Total reads spanning pair"
"Reads variant at both positions"
"Reads variant at first position only"
"Reads variant at second position only"
"Annotations"
'''
no_variants = set(('no mutation','noisy segment: undetermined'))
tables_freqcheck = {}
tables_linkage = {}
linkages_undetermined = {}
some_unaccounted = {}
for VCF,chromosomes in self.polymorphism_linkage_bypop.items():
for chromosome, positions in chromosomes.items():
# Currently baga.CollectData.Genome only supports single chromosome
# genomes.
# This test should be run separately for each reference genome
# (chromosome) mapped against
if chromosome != self.genome.id:
# not blanking for now - not sure why/if this was necessary
# self.polymorphism_linkage_bypop[VCF][chromosome] = {}
print(
"Skipping chromosome {} present in BAM because "
"it doesn't match supplied genome: {}. This analysis "
"should be run separately for each reference genome "
"(chromosome) mapped against".format(chromosome, self.genome.id))
continue
table_freqcheck = []
table_linkage = []
linkages_undetermined[VCF] = {}
for these_positions,reads in positions.items():
# some summaries
# (tests for > 2 variants close enough to be tested by e.g. 500 bp
# insert fragments)
no_polymorphisms = set()
one_polymorphism = set()
linked_polymorphism = {}
low_quality_alignment = set()
too_noisy_to_test_linkage = set()
# "oligo" key is each str of fragemnt ID if only one read checked
# or tuple of is, is_read1 if both reads in a pair were checked
for oligo,alleles in reads.items():
if alleles == 'low quality alignment':
low_quality_alignment.add(oligo)
elif len([v for v in alleles.values() if \
'noisy' not in v]) < 2:
too_noisy_to_test_linkage.add(oligo)
elif all([v == 'no mutation' for v in alleles.values() if \
'noisy' not in v]):
no_polymorphisms.add(oligo)
elif len([v for v in alleles.values() if \
v != 'no mutation' and \
'noisy' not in v]) == 1:
one_polymorphism.add(oligo)
else:
linked_polymorphism[oligo] = dict(
[(pos1,v) for pos1,v in alleles.items() if \
v != 'no mutation' and \
'noisy' not in v]
)
print('Number of positions: {}'.format(len(these_positions)))
if len(linked_polymorphism) > 0 and len(these_positions) > 2:
print(max(map(len,linked_polymorphism.values())))
if max(map(len,linked_polymorphism.values())) > 2:
print('Three way linkage: not implemented')
print('Pop: {}'.format(VCF))
print('\treads: {}'.format(len(reads)))
print('\tpositions: {}'.format(','.join(map(str,these_positions))))
print('\tlinked: {}'.format(len(linked_polymorphism)))
print('\tsingles: {}'.format(len(one_polymorphism)))
print('\tno mutations: {}'.format(len(no_polymorphisms)))
print('\t(not mapped: {}; too noisy: {})'.format(
len(low_quality_alignment),
len(too_noisy_to_test_linkage)))
#### linkage table ####
# divide up read findings by pairs of loci ==> rows
## variants at this stage are not always just pairs, some of these frags have >2 <== WHY?
## i.e. 'by_pair' is a wrong assumption
## would actually be better to scale up to arbitrary number of adjacent variants
by_cluster = _defaultdict(list)
for loci in reads.values():
if isinstance(loci,dict):
variant_positions = tuple(sorted(loci))
muts = map(loci.get,sorted(loci))
by_cluster[variant_positions] += [muts]
else:
# could count unusable reads here also . . .
pass
## minimum read depth limit applied here:
by_cluster = dict([(cluster,combos) for cluster,combos in by_cluster.items() if \
len(combos) >= min_spanning_read_depth])
for cluster,combos in by_cluster.items():
# get ORF annotation(s) for this pair
# Not implemented: would need baga.CollectData.Genome to retain more
# information.
## could optionally provide a gbk or DL via accession for locus info?
# these_features = {}
# for p in pair:
# for ORF,(s,e) in ORFslices['PAO1'].items():
# if s < p <= e:
# these_features[p] = [ORF]
# break
# these_features[p] = [-1]
# annos_for_loci_affected = set(collect_ORF_anno(these_features))
# annos = []
# for anno in annos_for_loci_affected:
# if len(anno) == 3:
# this_annotation = '%s: %s (%s)' % tuple(anno)
# elif len(anno) == 2:
# # gene name not available
# this_annotation = '%s: %s' % tuple(anno)
# else:
# this_annotation = anno
# annos += [this_annotation]
# if len(set(annos)) == 1:
# annotation = annos[0]
# else:
# annotation = '; '.join(annos)
annotation = 'not available'
ABlinked = 0
Aonly = 0
Bonly = 0
wild_type = 0
# 1 or both variants noisy so linkage cannot be determined
linkage_undetermined = 0
#### this bit needs updating to handle more than just pairs . . .
if len(cluster) > 2:
e = 'More than two alleles per read or fragment not implemented! '\
'Raise an issue at github.com/daveuu/baga if you need this feature. '\
'Polymorphisms in your data are sufficiently close for >2 to be spanned '\
'by single reads/fragments.'
raise NotImplementedError(e)
for a,b in combos:
if a not in no_variants and b not in no_variants:
ABlinked += 1
elif 'noisy segment: undetermined' in (a,b):
linkage_undetermined += 1
elif a not in no_variants:
Aonly += 1
elif b not in no_variants:
Bonly += 1
else:
wild_type += 0
#print(ABlinked, Aonly, Bonly, wild_type, linkage_undetermined)
table_linkage += [[ # "Variant pair pos"
', '.join(map(str,sorted(cluster))),
# "Total infm reads spanning"
ABlinked + Aonly + Bonly + wild_type,
# "variant at both"
ABlinked,
# "variant at first"
Aonly,
# "variant at second"
Bonly,
# "Annotations"
annotation]]
linkages_undetermined[VCF][tuple(sorted(cluster))] = linkage_undetermined
#### frequency check table ####
# How can mismatches be explained?
# Reads with low alignment scores (<60), ambiguous indel alignments?
# "Chromosome position", "Observed Frequencies", "Called Frequencies"
some_freqs = self.compare_allele_freqs(reads, VCF, chromosome)
for pos1,b in some_freqs.items():
# correct for unchecked reads from total here?
# i.e. -len(low_quality_alignment)-len(too_noisy_to_test_linkage)
# correct for uncheckable reads from total read count
freq_at_this_locus_in_reads = int(round(40*(b/float(len(reads) - \
len(low_quality_alignment) - \
len(too_noisy_to_test_linkage)))))
freq_at_this_locus_in_calls = len(filter(lambda x: x != 0,
pooled_variants[VCF]['NC_002516.2'][pos1]['GT']))
table_freqcheck += [[ # "Chromosome position"
pos1,
# "Observed Frequencies"
freq_at_this_locus_in_reads,
# "Called Frequencies"
freq_at_this_locus_in_calls]]
if len(reads) != len(linked_polymorphism) + \
len(one_polymorphism) + \
len(no_polymorphisms) + \
len(low_quality_alignment) + \
len(too_noisy_to_test_linkage):
print('SOME READS UNACCOUNTED FOR')
some_unaccounted[VCF,these_positions] = sorted(set(reads) - (linked_polymorphism | \
one_polymorphism | \
linked_polymorphism | \
low_quality_alignment | \
too_noisy_to_test_linkage))
tables_freqcheck[VCF] = table_freqcheck
tables_linkage[VCF] = table_linkage
self.tables_freqcheck = tables_freqcheck
self.tables_linkage = tables_linkage
def writeTables(self, freq_table_name = 'freq_table.csv',
linkage_table_name = 'linkage_table.csv'):
'''make csv tables containing linkage information'''
# frequencies of each allele
headers_freqcheck = ["Sample",
"Chromosome position",
"Observed Frequencies",
"Called Frequencies"]
# biological info i.e., which bit of chromosome, its function,
# groups of linked variants (only pairs in this case so row per pair)
headers_linkage = [ "Sample",
"Variant pair positions",
"Total informative reads spanning pair",
"Reads variant at both positions",
"Reads variant at first position only",
"Reads variant at second position only",
"Annotations"]
with open(freq_table_name, 'w') as fout:
print('Writing table of observed frequencies (reads counted) with frequencies '
'provided in the VCFs to {}'.format(freq_table_name))
fout.write(','.join(headers_freqcheck)+'\n')
for VCF, clusters in self.tables_freqcheck.items():
sample = VCF.split(_os.path.sep)[-1]
for pos, observed_freqs, called_freqs in clusters:
#print(sample, pos, observed_freqs, called_freqs)
row = '"{}","{}",{},{},{},{},"{}"\n'.format(sample, pos,
observed_freqs,
called_freqs)
fout.write(row)
with open(linkage_table_name, 'w') as fout:
print('Writing table of linkage status of nearby polymorphisms to '
'{}'.format(linkage_table_name))
fout.write(','.join(headers_linkage)+'\n')
for VCF, clusters in self.tables_linkage.items():
sample = VCF.split(_os.path.sep)[-1]
for pos, total_reads, reads_w_both, reads_w_A, reads_w_B, annotation in clusters:
#print(sample, pos, total_reads, reads_w_both, reads_w_A, reads_w_B, annotation)
if sum([total_reads, reads_w_both, reads_w_A, reads_w_B]) == 0:
print('Reads spanning polymorphisms at {} too noisy for inference ({})'.format(pos, sample))
continue
row = '"{}","{}",{},{},{},{},"{}"\n'.format(sample, pos, total_reads,
reads_w_both,
reads_w_A,
reads_w_B,
annotation)
fout.write(row)
def doLinkageCheck(self, dist = 1000):
'''Call various methods to perform linkage testing'''
self.parsePooledVCF()
print('Collecting nearby variants')
self.collectAdjacentPolymorphisms(dist = dist)
for BAM in self.alignment_paths:
indexfile = _os.path.extsep.join([BAM,'bai'])
if not(_os.path.exists(indexfile) and _os.path.getsize(indexfile) > 0):
print('indexing {}'.format(BAM))
_pysam.index(BAM)
print('Checking read-reference alignments')
self.checkAlignments()
self.tabulateResults()
self.writeTables()
if __name__ == '__main__':
main()
| daveuu/baga | CallVariants.py | Python | gpl-3.0 | 181,875 | [
"BWA",
"pysam"
] | 50d04d94802b88193ab7e23a203726525e7bbe8e6bfbfb44d8cdbd2adb1100ed |
import numpy as np
from spherical_functions import LM_total_size
from .. import ModesTimeSeries
from .. import Inertial
class AsymptoticBondiData:
"""Class to store asymptotic Bondi data
This class stores time data, along with the corresponding values of psi0 through psi4 and sigma.
For simplicity, the data are stored as one contiguous array. That is, *all* values are stored
at all times, even if they are zero, and all Modes objects are stored with ell_min=0, even when
their spins are not zero.
The single contiguous array is then viewed as 6 separate ModesTimeSeries objects, which enables
them to track their spin weights, and provides various convenient methods like `eth` and
`ethbar`; `dot` and `ddot` for time-derivatives; `int` and `iint` for time-integrations; `norm`
to take the norm of a function over the sphere; `bar` for conjugation of the functions (which is
different from just conjugating the mode weights); etc. It also handles algebra correctly --
particularly addition (which is disallowed when the spin weights differ) and multiplication
(which can be delicate with regards to the resulting ell values).
This may lead to some headaches when the user tries to do things that are disabled by Modes
objects. The goal is to create headaches if and only if the user is trying to do things that
really should never be done (like conjugating mode weights, rather than the underlying function;
adding modes with different spin weights; etc.). Please open issues for any situations that
don't meet this standard.
This class also provides various convenience methods for computing things like the mass aspect,
the Bondi four-momentum, the Bianchi identities, etc.
"""
def __init__(self, time, ell_max, multiplication_truncator=sum, frameType=Inertial):
"""Create new storage for asymptotic Bondi data
Parameters
==========
time: int or array_like
Times at which the data will be stored. If this is an int, an empty array of that size
will be created. Otherwise, this must be a 1-dimensional array of floats.
ell_max: int
Maximum ell value to be stored
multiplication_truncator: callable [defaults to `sum`, even though `max` is nicer]
Function to be used by default when multiplying Modes objects together. See the
documentation for spherical_functions.Modes.multiply for more details. The default
behavior with `sum` is the most correct one -- keeping all ell values that result -- but
also the most wasteful, and very likely to be overkill. The user should probably always
use `max`. (Unfortunately, this must remain an opt-in choice, to ensure that the user
is aware of the situation.)
"""
import functools
if np.ndim(time) == 0:
# Assume this is just the size of the time array; construct an empty array
time = np.empty((time,), dtype=float)
elif np.ndim(time) > 1:
raise ValueError(f"Input `time` parameter must be an integer or a 1-d array; it has shape {time.shape}")
if time.dtype != float:
raise ValueError(f"Input `time` parameter must have dtype float; it has dtype {time.dtype}")
ModesTS = functools.partial(ModesTimeSeries, ell_max=ell_max, multiplication_truncator=multiplication_truncator)
shape = [6, time.size, LM_total_size(0, ell_max)]
self.frame = np.array([])
self.frameType = frameType
self._time = time.copy()
self._raw_data = np.zeros(shape, dtype=complex)
self._psi0 = ModesTS(self._raw_data[0], self._time, spin_weight=2)
self._psi1 = ModesTS(self._raw_data[1], self._time, spin_weight=1)
self._psi2 = ModesTS(self._raw_data[2], self._time, spin_weight=0)
self._psi3 = ModesTS(self._raw_data[3], self._time, spin_weight=-1)
self._psi4 = ModesTS(self._raw_data[4], self._time, spin_weight=-2)
self._sigma = ModesTS(self._raw_data[5], self._time, spin_weight=2)
@property
def time(self):
return self._time
@time.setter
def time(self, new_time):
self._time[:] = new_time
return self._time
u = time
t = time
@property
def n_times(self):
return self.time.size
@property
def n_modes(self):
return self._raw_data.shape[-1]
@property
def ell_min(self):
return self._psi2.ell_min
@property
def ell_max(self):
return self._psi2.ell_max
@property
def LM(self):
return self.psi2.LM
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, sigmaprm):
self._sigma[:] = sigmaprm
return self.sigma
@property
def psi4(self):
return self._psi4
@psi4.setter
def psi4(self, psi4prm):
self._psi4[:] = psi4prm
return self.psi4
@property
def psi3(self):
return self._psi3
@psi3.setter
def psi3(self, psi3prm):
self._psi3[:] = psi3prm
return self.psi3
@property
def psi2(self):
return self._psi2
@psi2.setter
def psi2(self, psi2prm):
self._psi2[:] = psi2prm
return self.psi2
@property
def psi1(self):
return self._psi1
@psi1.setter
def psi1(self, psi1prm):
self._psi1[:] = psi1prm
return self.psi1
@property
def psi0(self):
return self._psi0
@psi0.setter
def psi0(self, psi0prm):
self._psi0[:] = psi0prm
return self.psi0
def copy(self):
import copy
new_abd = type(self)(self.t, self.ell_max)
state = copy.deepcopy(self.__dict__)
new_abd.__dict__.update(state)
return new_abd
def interpolate(self, new_times):
new_abd = type(self)(new_times, self.ell_max)
new_abd.frameType = self.frameType
# interpolate waveform data
new_abd.sigma = self.sigma.interpolate(new_times)
new_abd.psi4 = self.psi4.interpolate(new_times)
new_abd.psi3 = self.psi3.interpolate(new_times)
new_abd.psi2 = self.psi2.interpolate(new_times)
new_abd.psi1 = self.psi1.interpolate(new_times)
new_abd.psi0 = self.psi0.interpolate(new_times)
# interpolate frame data if necessary
if self.frame.shape[0] == self.n_times:
import quaternion
new_abd.frame = quaternion.squad(self.frame, self.t, new_times)
return new_abd
from .from_initial_values import from_initial_values
from .constraints import (
bondi_constraints,
bondi_violations,
bondi_violation_norms,
bianchi_0,
bianchi_1,
bianchi_2,
constraint_3,
constraint_4,
constraint_mass_aspect,
)
from .transformations import transform
from .bms_charges import (
mass_aspect,
bondi_rest_mass,
bondi_four_momentum,
bondi_angular_momentum,
bondi_dimensionless_spin,
bondi_boost_charge,
bondi_CoM_charge,
supermomentum,
)
| moble/scri | scri/asymptotic_bondi_data/__init__.py | Python | mit | 7,236 | [
"Psi4"
] | a00148456b2e220660b74f02c08f39a70d8b554b68efb1ae5ffbfe18676e4167 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Monte Carlo Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.bayesflow.python.ops import monte_carlo_impl as monte_carlo_lib
from tensorflow.contrib.bayesflow.python.ops.monte_carlo_impl import _get_samples
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
distributions = distributions_lib
layers = layers_lib
monte_carlo = monte_carlo_lib
class ExpectationImportanceSampleTest(test.TestCase):
def test_normal_integral_mean_and_var_correctly_estimated(self):
n = int(1e6)
with self.test_session():
mu_p = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64)
mu_q = constant_op.constant([0.0, 0.0], dtype=dtypes.float64)
sigma_p = constant_op.constant([0.5, 0.5], dtype=dtypes.float64)
sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64)
p = distributions.Normal(loc=mu_p, scale=sigma_p)
q = distributions.Normal(loc=mu_q, scale=sigma_q)
# Compute E_p[X].
e_x = monte_carlo.expectation_importance_sampler(
f=lambda x: x, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Compute E_p[X^2].
e_x2 = monte_carlo.expectation_importance_sampler(
f=math_ops.square, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
stddev = math_ops.sqrt(e_x2 - math_ops.square(e_x))
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence of mean is +- 0.003 if n = 100M
# Convergence of stddev is +- 0.00001 if n = 100M
self.assertEqual(p.batch_shape, e_x.get_shape())
self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01)
self.assertAllClose(p.stddev().eval(), stddev.eval(), rtol=0.02)
def test_multivariate_normal_prob_positive_product_of_components(self):
# Test that importance sampling can correctly estimate the probability that
# the product of components in a MultivariateNormal are > 0.
n = 1000
with self.test_session():
p = distributions.MultivariateNormalDiag(
loc=[0.0, 0.0], scale_diag=[1.0, 1.0])
q = distributions.MultivariateNormalDiag(
loc=[0.5, 0.5], scale_diag=[3., 3.])
# Compute E_p[X_1 * X_2 > 0], with X_i the ith component of X ~ p(x).
# Should equal 1/2 because p is a spherical Gaussian centered at (0, 0).
def indicator(x):
x1_times_x2 = math_ops.reduce_prod(x, reduction_indices=[-1])
return 0.5 * (math_ops.sign(x1_times_x2) + 1.0)
prob = monte_carlo.expectation_importance_sampler(
f=indicator, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence is +- 0.004 if n = 100k.
self.assertEqual(p.batch_shape, prob.get_shape())
self.assertAllClose(0.5, prob.eval(), rtol=0.05)
class ExpectationImportanceSampleLogspaceTest(test.TestCase):
def test_normal_distribution_second_moment_estimated_correctly(self):
# Test the importance sampled estimate against an analytical result.
n = int(1e6)
with self.test_session():
mu_p = constant_op.constant([0.0, 0.0], dtype=dtypes.float64)
mu_q = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64)
sigma_p = constant_op.constant([1.0, 2 / 3.], dtype=dtypes.float64)
sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64)
p = distributions.Normal(loc=mu_p, scale=sigma_p)
q = distributions.Normal(loc=mu_q, scale=sigma_q)
# Compute E_p[X^2].
# Should equal [1, (2/3)^2]
log_e_x2 = monte_carlo.expectation_importance_sampler_logspace(
log_f=lambda x: math_ops.log(math_ops.square(x)),
log_p=p.log_prob,
sampling_dist_q=q,
n=n,
seed=42)
e_x2 = math_ops.exp(log_e_x2)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual(p.batch_shape, e_x2.get_shape())
self.assertAllClose([1., (2 / 3.)**2], e_x2.eval(), rtol=0.02)
class ExpectationTest(test.TestCase):
def test_mc_estimate_of_normal_mean_and_variance_is_correct_vs_analytic(self):
random_seed.set_random_seed(0)
n = 20000
with self.test_session():
p = distributions.Normal(loc=[1.0, -1.0], scale=[0.3, 0.5])
# Compute E_p[X] and E_p[X^2].
z = p.sample(n, seed=42)
e_x = monte_carlo.expectation(lambda x: x, p, z=z, seed=42)
e_x2 = monte_carlo.expectation(math_ops.square, p, z=z, seed=0)
var = e_x2 - math_ops.square(e_x)
self.assertEqual(p.batch_shape, e_x.get_shape())
self.assertEqual(p.batch_shape, e_x2.get_shape())
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01)
self.assertAllClose(p.variance().eval(), var.eval(), rtol=0.02)
class GetSamplesTest(test.TestCase):
"""Test the private method 'get_samples'."""
def test_raises_if_both_z_and_n_are_none(self):
with self.test_session():
dist = distributions.Normal(loc=0., scale=1.)
z = None
n = None
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
_get_samples(dist, z, n, seed)
def test_raises_if_both_z_and_n_are_not_none(self):
with self.test_session():
dist = distributions.Normal(loc=0., scale=1.)
z = dist.sample(seed=42)
n = 1
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
_get_samples(dist, z, n, seed)
def test_returns_n_samples_if_n_provided(self):
with self.test_session():
dist = distributions.Normal(loc=0., scale=1.)
z = None
n = 10
seed = None
z = _get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
def test_returns_z_if_z_provided(self):
with self.test_session():
dist = distributions.Normal(loc=0., scale=1.)
z = dist.sample(10, seed=42)
n = None
seed = None
z = _get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
if __name__ == '__main__':
test.main()
| AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/contrib/bayesflow/python/kernel_tests/monte_carlo_test.py | Python | apache-2.0 | 7,445 | [
"Gaussian"
] | e97320eaab9532adc78ea19919f65ceedfa582aa912375a3a0694cf8fbdf38f4 |
#!/usr/bin/env python2
# Copyright (C) 2016-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# #
# ESPResSo++ Python script for a Multisystem simulation #
# #
###########################################################################
import espressopp
from espressopp import Int3D, Real3D
from espressopp.tools import decomp
from espressopp.tools import lattice
import random
import sys
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print "+ Multisystem simulations are still possible but have to be setup manually. +"
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
sys.exit(0)
# some global definitions
skin = 0.3
rc = 2.5
epsilon = 1.0
sigma = 1.0
shift = 0.0
dt = 0.005
gamma = 1.0
temperature = 1.0
ptrng=random
ptrng.seed(335977)
if espressopp.MPI.COMM_WORLD.size != 4:
print "currently this example can only be run with 4 CPUs"
sys.exit(0)
# Parallel Tempering (replica exchange) integrator
ptthermostats=[]
pt = espressopp.ParallelTempering(NumberOfSystems = 4, RNG = ptrng)
for i in range(0, pt.getNumberOfSystems()):
pt.startDefiningSystem(i)
pid, type, x, y, z, vx, vy, vz, Lx, Ly, Lz = espressopp.tools.readxyz('parallel_tempering.xyz')
num_particles = len(pid)
boxsize = (Lx, Ly, Lz)
rho = num_particles / (Lx * Ly * Lz)
system = espressopp.System()
rng = espressopp.esutil.RNG()
bc = espressopp.bc.OrthorhombicBC(rng, boxsize)
system.bc = bc
system.rng = rng
system.skin = skin
nodeGrid = espressopp.tools.decomp.nodeGrid(pt.getNumberOfCPUsPerSystem(),boxsize,rc,skin)
cellGrid = espressopp.tools.decomp.cellGrid(boxsize,nodeGrid,rc,skin)
storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid, nocheck=True)
system.storage = storage
vl = espressopp.VerletList(system,cutoff=rc)
potLJ = espressopp.interaction.LennardJones(epsilon, sigma, rc, shift)
interLJ = espressopp.interaction.VerletListLennardJones(vl)
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.dt = dt
langevin = espressopp.integrator.LangevinThermostat(system)
langevin.gamma = gamma
langevin.temperature = temperature*i/20 + 0.2
integrator.addExtension(langevin)
interLJ.setPotential(type1=0, type2=0, potential=potLJ)
system.addInteraction(interLJ)
for k in range(num_particles):
storage.addParticle(pid[k], Real3D(x[k], y[k], z[k]), checkexist=False)
storage.decompose()
pt.setIntegrator(integrator, langevin)
pt.setAnalysisE(interLJ)
pt.setAnalysisT(espressopp.analysis.Temperature(system))
pt.setAnalysisNPart(espressopp.analysis.NPart(system))
pt.endDefiningSystem(i)
# let each system reach its temperature
for p in range(100):
pt.run(100)
multiT = pt._multisystem.runAnalysisTemperature()
print "%s" % multiT
for p in range(10):
pt.run(200)
pt.exchange()
multiT = pt._multisystem.runAnalysisTemperature()
print "%s" % multiT
| govarguz/espressopp | examples/parallel_tempering/parallel_tempering.py | Python | gpl-3.0 | 4,272 | [
"ESPResSo"
] | 6b161dc89f56c3f8399664d9fa2ca2559c3da36405ceb9da900e5773a376fc9a |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import StringIO
from future.builtins import zip
from operator import attrgetter
from unittest import TestCase, main
import numpy as np
import pandas as pd
import numpy.testing as npt
import pandas.util.testing as pdt
from skbio.util import get_data_path, assert_data_frame_almost_equal
from skbio.stats.gradient import (GradientANOVA, AverageGradientANOVA,
TrajectoryGradientANOVA,
FirstDifferenceGradientANOVA,
WindowDifferenceGradientANOVA, GroupResults,
CategoryResults, GradientANOVAResults,
_weight_by_vector, _ANOVA_trajectories)
class BaseTests(TestCase):
def setUp(self):
"""Initializes some data for testing"""
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368, 0.03532727349,
-0.254450494129, -0.0687468542543,
0.231895596562, 0.00496549154314,
-0.0026246871695, 9.73837390723e-10]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992, 0.0957182357964,
0.204714844022, -0.0055407341857,
-0.190287966833, 0.16307126638,
9.73837390723e-10]),
'PC.356': np.array([0.220886492631, 0.0874848360559,
-0.351990132198, -0.00316535032886,
0.114635191853, -0.00019194106125,
0.188557853937, 0.030002427212,
9.73837390723e-10]),
'PC.481': np.array([0.0308923744062, -0.0446295973489,
0.133996451689, 0.29318228566, -0.167812539312,
0.130996149793, 0.113551017379, 0.109987942454,
9.73837390723e-10]),
'PC.354': np.array([0.27616778138, -0.0341866951102,
0.0633000238256, 0.100446653327,
0.123802521199, 0.1285839664, -0.132852841046,
-0.217514322505, 9.73837390723e-10]),
'PC.593': np.array([0.202458130052, -0.115216120518,
0.301820871723, -0.18300251046, 0.136208248567,
-0.0989435556722, 0.0927738484879,
0.0909429797672, 9.73837390723e-10]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424, -0.0225473129718,
-0.205287183891, -0.180224615141,
-0.165277751908, 0.0411933458557,
9.73837390723e-10]),
'PC.607': np.array([-0.105517545144, -0.41405687433,
-0.150073017617, -0.116066751485,
-0.158763393475, -0.0223918378516,
-0.0263068046112, -0.0501209518091,
9.73837390723e-10]),
'PC.634': np.array([-0.371636765565, 0.115484234741,
0.0721996475289, 0.0898852445906,
0.0212491652909, -0.184183028843,
0.114877153051, -0.164938000185,
9.73837390723e-10])
}
self.coords = pd.DataFrame.from_dict(coord_data, orient='index')
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368,
0.03532727349]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'PC.356': np.array([0.220886492631, 0.0874848360559,
-0.351990132198]),
'PC.481': np.array([0.0308923744062, -0.0446295973489,
0.133996451689]),
'PC.354': np.array([0.27616778138, -0.0341866951102,
0.0633000238256]),
'PC.593': np.array([0.202458130052, -0.115216120518,
0.301820871723]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424]),
'PC.607': np.array([-0.105517545144, -0.41405687433,
-0.150073017617]),
'PC.634': np.array([-0.371636765565, 0.115484234741,
0.0721996475289])
}
self.coords_3axes = pd.DataFrame.from_dict(coord_data, orient='index')
metadata_map = {'PC.354': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '60',
'Description': 'Control_mouse_I.D._354'},
'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'PC.356': {'Treatment': 'Control',
'DOB': '20061126',
'Weight': '50',
'Description': 'Control_mouse_I.D._356'},
'PC.481': {'Treatment': 'Control',
'DOB': '20070314',
'Weight': '52',
'Description': 'Control_mouse_I.D._481'},
'PC.593': {'Treatment': 'Control',
'DOB': '20071210',
'Weight': '57',
'Description': 'Control_mouse_I.D._593'},
'PC.607': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'},
'PC.634': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '68',
'Description': 'Fasting_mouse_I.D._634'},
'PC.635': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '70',
'Description': 'Fasting_mouse_I.D._635'},
'PC.636': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '72',
'Description': 'Fasting_mouse_I.D._636'}}
self.metadata_map = pd.DataFrame.from_dict(metadata_map,
orient='index')
self.prop_expl = np.array([25.6216900347, 15.7715955926,
14.1215046787, 11.6913885817, 9.83044890697,
8.51253468595, 7.88775505332, 6.56308246609,
4.42499350906e-16])
gr_wo_msg = GroupResults('Foo', np.array([-2.6750, -0.2510,
-2.8322, 0.]),
-1.4398, {'mean': -1.4398, 'std': 1.3184},
None)
gr_w_msg = GroupResults('Bar', np.array([9.6823, 2.9511, 5.2434]),
5.9589, {'mean': 5.9589, 'std': 2.7942},
"Cannot calculate the first difference "
"with a window of size (3).")
self.groups = [gr_wo_msg, gr_w_msg]
cr_no_data = CategoryResults('foo', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
cr_data = CategoryResults('bar', 0.0110, self.groups, None)
self.categories = [cr_no_data, cr_data]
vr = GradientANOVAResults('wdiff', True, self.categories)
description = CategoryResults('Description', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
weight = CategoryResults('Weight', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
dob = CategoryResults('DOB', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
control_group = GroupResults('Control', np.array([2.3694, 3.3716,
5.4452, 4.5704,
4.4972]),
4.0508, {'avg': 4.0508}, None)
fast_group = GroupResults('Fast', np.array([7.2220, 4.2726, 1.1169,
4.0271]),
4.1596, {'avg': 4.1596}, None)
treatment = CategoryResults('Treatment', 0.9331,
[control_group, fast_group], None)
vr_real = GradientANOVAResults('avg', False, [description, weight, dob,
treatment])
self.vec_results = [vr, vr_real]
# This function makes the comparisons between the results classes easier
def assert_group_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.name, exp.name)
npt.assert_almost_equal(obs.trajectory, exp.trajectory)
npt.assert_almost_equal(obs.mean, exp.mean)
self.assertEqual(obs.info.keys(), exp.info.keys())
for key in obs.info:
npt.assert_almost_equal(obs.info[key], exp.info[key])
self.assertEqual(obs.message, exp.message)
def assert_category_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.category, exp.category)
if exp.probability is None:
self.assertTrue(obs.probability is None)
self.assertTrue(obs.groups is None)
else:
npt.assert_almost_equal(obs.probability, exp.probability)
for o, e in zip(sorted(obs.groups, key=attrgetter('name')),
sorted(exp.groups, key=attrgetter('name'))):
self.assert_group_results_almost_equal(o, e)
def assert_gradientANOVA_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.algorithm, exp.algorithm)
self.assertEqual(obs.weighted, exp.weighted)
for o, e in zip(sorted(obs.categories, key=attrgetter('category')),
sorted(exp.categories, key=attrgetter('category'))):
self.assert_category_results_almost_equal(o, e)
class GradientTests(BaseTests):
def test_weight_by_vector(self):
"""Correctly weights the vectors"""
trajectory = pd.DataFrame.from_dict({'s1': np.array([1]),
's2': np.array([2]),
's3': np.array([3]),
's4': np.array([4]),
's5': np.array([5]),
's6': np.array([6]),
's7': np.array([7]),
's8': np.array([8])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([1, 5, 8, 12, 45, 80, 85, 90]),
['s1', 's2', 's3', 's4',
's5', 's6', 's7', 's8']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s1': np.array([1]),
's2': np.array([6.3571428571]),
's3': np.array([12.7142857142]),
's4': np.array([12.7142857142]),
's5': np.array([1.9264069264]),
's6': np.array([2.1795918367]),
's7': np.array([17.8]),
's8': np.array([20.3428571428])},
orient='index')
obs = _weight_by_vector(trajectory, w_vector)
assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
trajectory = pd.DataFrame.from_dict({'s1': np.array([1]),
's2': np.array([2]),
's3': np.array([3]),
's4': np.array([4]),
's5': np.array([5]),
's6': np.array([6]),
's7': np.array([7]),
's8': np.array([8])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([1, 2, 3, 4, 5, 6, 7, 8]),
['s1', 's2', 's3', 's4',
's5', 's6', 's7', 's8']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s1': np.array([1]), 's2': np.array([2]),
's3': np.array([3]), 's4': np.array([4]),
's5': np.array([5]), 's6': np.array([6]),
's7': np.array([7]), 's8': np.array([8])
},
orient='index')
obs = _weight_by_vector(trajectory, w_vector)
assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
trajectory = pd.DataFrame.from_dict({'s2': np.array([2]),
's3': np.array([3]),
's4': np.array([4]),
's5': np.array([5]),
's6': np.array([6])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([25, 30, 35, 40, 45]),
['s2', 's3', 's4', 's5', 's6']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s2': np.array([2]), 's3': np.array([3]),
's4': np.array([4]), 's5': np.array([5]),
's6': np.array([6])}, orient='index')
obs = _weight_by_vector(trajectory, w_vector)
assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
trajectory = pd.DataFrame.from_dict({'s1': np.array([1, 2, 3]),
's2': np.array([2, 3, 4]),
's3': np.array([5, 6, 7]),
's4': np.array([8, 9, 10])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([1, 2, 3, 4]),
['s1', 's2', 's3', 's4']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s1': np.array([1, 2, 3]),
's2': np.array([2, 3, 4]),
's3': np.array([5, 6, 7]),
's4': np.array([8, 9, 10])},
orient='index').astype(np.float64)
obs = _weight_by_vector(trajectory, w_vector)
assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
sample_ids = ['PC.356', 'PC.481', 'PC.355', 'PC.593', 'PC.354']
trajectory = pd.DataFrame.from_dict({'PC.356': np.array([5.65948525,
1.37977545,
-4.9706303]),
'PC.481': np.array([0.79151484,
-0.70387996,
1.89223152]),
'PC.355': np.array([6.05869624,
3.44821245,
-0.42595788]),
'PC.593': np.array([5.18731945,
-1.81714206,
4.26216485]),
'PC.354': np.array([7.07588529,
-0.53917873,
0.89389158])
}, orient='index')
w_vector = pd.Series(np.array([50, 52, 55, 57, 60]),
sample_ids).astype(np.float64)
exp = pd.DataFrame.from_dict({'PC.356': np.array([5.65948525,
1.37977545,
-4.9706303]),
'PC.481': np.array([0.98939355,
-0.87984995,
2.3652894]),
'PC.355': np.array([5.04891353,
2.87351038,
-0.3549649]),
'PC.593': np.array([6.48414931,
-2.27142757,
5.32770606]),
'PC.354': np.array([5.89657108,
-0.44931561,
0.74490965])
}, orient='index')
obs = _weight_by_vector(trajectory.ix[sample_ids],
w_vector[sample_ids])
assert_data_frame_almost_equal(obs.sort(axis=0), exp.sort(axis=0))
def test_weight_by_vector_single_element(self):
trajectory = pd.DataFrame.from_dict({'s1': np.array([42])},
orient='index')
w_vector = pd.Series(np.array([5]), ['s1']).astype(np.float64)
obs = _weight_by_vector(trajectory, w_vector)
assert_data_frame_almost_equal(obs, trajectory)
def test_weight_by_vector_error(self):
"""Raises an error with erroneous inputs"""
# Different vector lengths
with self.assertRaises(ValueError):
_weight_by_vector([1, 2, 3, 4], [1, 2, 3])
# Inputs are not iterables
with self.assertRaises(TypeError):
_weight_by_vector(9, 1)
# Weighting vector is not a gradient
with self.assertRaises(ValueError):
_weight_by_vector([1, 2, 3, 4], [1, 2, 3, 3])
def test_ANOVA_trajectories(self):
"""Correctly performs the check before running ANOVA"""
# Only one group in a given category
group = GroupResults('Bar', np.array([2.3694943596755276,
3.3716388181385781,
5.4452089176253367,
4.5704258453173559,
4.4972603724478377]),
4.05080566264, {'avg': 4.0508056626409275}, None)
obs = _ANOVA_trajectories('Foo', [group])
exp = CategoryResults('Foo', None, None,
'Only one value in the group.')
self.assert_category_results_almost_equal(obs, exp)
# One element have only one element
group2 = GroupResults('FooBar', np.array([4.05080566264]),
4.05080566264, {'avg': 4.05080566264}, None)
obs = _ANOVA_trajectories('Foo', [group, group2])
exp = CategoryResults('Foo', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
self.assert_category_results_almost_equal(obs, exp)
gr1 = GroupResults('Foo', np.array([-0.219044992, 0.079674486,
0.09233683]),
-0.015677892, {'avg': -0.015677892}, None)
gr2 = GroupResults('Bar', np.array([-0.042258081, 0.000204041,
0.024837603]),
-0.0732878716, {'avg': -0.0732878716}, None)
gr3 = GroupResults('FBF', np.array([0.080504323, -0.212014503,
-0.088353435]),
-0.0057388123, {'avg': -0.0057388123}, None)
obs = _ANOVA_trajectories('Cat', [gr1, gr2, gr3])
exp = CategoryResults('Cat', 0.8067456876, [gr1, gr2, gr3], None)
self.assert_category_results_almost_equal(obs, exp)
class GroupResultsTests(BaseTests):
def test_to_file(self):
out_paths = ['gr_wo_msg_out', 'gr_w_msg_out']
raw_paths = ['gr_wo_msg_raw', 'gr_w_msg_raw']
for gr, out_fp, raw_fp in zip(self.groups, out_paths, raw_paths):
obs_out_f = StringIO()
obs_raw_f = StringIO()
gr.to_files(obs_out_f, obs_raw_f)
obs_out = obs_out_f.getvalue()
obs_raw = obs_raw_f.getvalue()
obs_out_f.close()
obs_raw_f.close()
with open(get_data_path(out_fp)) as f:
exp_out = f.read()
with open(get_data_path(raw_fp)) as f:
exp_raw = f.read()
self.assertEqual(obs_out, exp_out)
self.assertEqual(obs_raw, exp_raw)
class CategoryResultsTests(BaseTests):
def test_to_file(self):
out_paths = ['cr_no_data_out', 'cr_data_out']
raw_paths = ['cr_no_data_raw', 'cr_data_raw']
for cat, out_fp, raw_fp in zip(self.categories, out_paths, raw_paths):
obs_out_f = StringIO()
obs_raw_f = StringIO()
cat.to_files(obs_out_f, obs_raw_f)
obs_out = obs_out_f.getvalue()
obs_raw = obs_raw_f.getvalue()
obs_out_f.close()
obs_raw_f.close()
with open(get_data_path(out_fp)) as f:
exp_out = f.read()
with open(get_data_path(raw_fp)) as f:
exp_raw = f.read()
self.assertEqual(obs_out, exp_out)
self.assertEqual(obs_raw, exp_raw)
class GradientANOVAResultsTests(BaseTests):
def test_to_file(self):
out_paths = ['vr_out']
raw_paths = ['vr_raw']
for vr, out_fp, raw_fp in zip(self.vec_results, out_paths, raw_paths):
obs_out_f = StringIO()
obs_raw_f = StringIO()
vr.to_files(obs_out_f, obs_raw_f)
obs_out = obs_out_f.getvalue()
obs_raw = obs_raw_f.getvalue()
obs_out_f.close()
obs_raw_f.close()
with open(get_data_path(out_fp)) as f:
exp_out = f.read()
with open(get_data_path(raw_fp)) as f:
exp_raw = f.read()
self.assertEqual(obs_out, exp_out)
self.assertEqual(obs_raw, exp_raw)
class GradientANOVATests(BaseTests):
def test_init(self):
"""Correctly initializes the class attributes"""
# Note self._groups is tested on test_make_groups
# so we are not testing it here
# Test with weighted = False
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
assert_data_frame_almost_equal(bv._coords, self.coords_3axes)
exp_prop_expl = np.array([25.6216900347, 15.7715955926,
14.1215046787])
npt.assert_equal(bv._prop_expl, exp_prop_expl)
assert_data_frame_almost_equal(bv._metadata_map, self.metadata_map)
self.assertTrue(bv._weighting_vector is None)
self.assertFalse(bv._weighted)
# Test with weighted = True
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
sort_category='Weight', weighted=True)
assert_data_frame_almost_equal(bv._coords, self.coords_3axes)
npt.assert_equal(bv._prop_expl, exp_prop_expl)
assert_data_frame_almost_equal(bv._metadata_map, self.metadata_map)
exp_weighting_vector = pd.Series(
np.array([60, 55, 50, 52, 57, 65, 68, 70, 72]),
['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593', 'PC.607',
'PC.634', 'PC.635', 'PC.636'], name='Weight'
).astype(np.float64)
pdt.assert_series_equal(bv._weighting_vector, exp_weighting_vector)
self.assertTrue(bv._weighted)
def test_init_error(self):
"""Raises an error with erroneous inputs"""
# Raises ValueError if any category in trajectory_categories is not
# present in metadata_map
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
trajectory_categories=['foo'])
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
trajectory_categories=['Weight', 'Treatment', 'foo'])
# Raises ValueError if sort_category is not present in metadata_map
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
sort_category='foo')
# Raises ValueError if weighted == True and sort_category == None
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
weighted=True)
# Raises ValueError if weighted == True and the values under
# sort_category are not numerical
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
sort_category='Treatment', weighted=True)
# Raises ValueError if axes > len(prop_expl)
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
axes=10)
# Raises ValueError if axes < 0
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
axes=-1)
def test_normalize_samples(self):
"""Correctly normalizes the samples between coords and metadata_map"""
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368,
0.03532727349]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424]),
'PC.607': np.array([-0.105517545144, -0.41405687433,
-0.150073017617]),
'PC.634': np.array([-0.371636765565, 0.115484234741,
0.0721996475289])
}
subset_coords = pd.DataFrame.from_dict(coord_data, orient='index')
metadata_map = {'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'PC.607': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'},
'PC.634': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '68',
'Description': 'Fasting_mouse_I.D._634'},
'PC.635': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '70',
'Description': 'Fasting_mouse_I.D._635'},
'PC.636': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '72',
'Description': 'Fasting_mouse_I.D._636'}}
subset_metadata_map = pd.DataFrame.from_dict(metadata_map,
orient='index')
# Takes a subset from metadata_map
bv = GradientANOVA(subset_coords, self.prop_expl, self.metadata_map)
assert_data_frame_almost_equal(
bv._coords.sort(axis=0),
subset_coords.sort(axis=0))
assert_data_frame_almost_equal(
bv._metadata_map.sort(axis=0),
subset_metadata_map.sort(axis=0))
# Takes a subset from coords
bv = GradientANOVA(self.coords, self.prop_expl, subset_metadata_map)
assert_data_frame_almost_equal(
bv._coords.sort(axis=0),
subset_coords.sort(axis=0))
assert_data_frame_almost_equal(
bv._metadata_map.sort(axis=0),
subset_metadata_map.sort(axis=0))
# Takes a subset from metadata_map and coords at the same time
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368,
0.03532727349]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424])
}
subset_coords = pd.DataFrame.from_dict(coord_data, orient='index')
metadata_map = {'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'PC.607': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'},
'PC.634': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '68',
'Description': 'Fasting_mouse_I.D._634'}}
subset_metadata_map = pd.DataFrame.from_dict(metadata_map,
orient='index')
bv = GradientANOVA(subset_coords, self.prop_expl, subset_metadata_map)
exp_coords = pd.DataFrame.from_dict(
{'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424])},
orient='index')
assert_data_frame_almost_equal(
bv._coords.sort(axis=0),
exp_coords.sort(axis=0))
exp_metadata_map = pd.DataFrame.from_dict(
{'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'}},
orient='index')
assert_data_frame_almost_equal(
bv._metadata_map.sort(axis=0),
exp_metadata_map.sort(axis=0))
def test_normalize_samples_error(self):
"""Raises an error if coords and metadata_map does not have samples in
common"""
error_metadata_map = pd.DataFrame.from_dict(
{'Foo': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'Bar': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'}},
orient='index')
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, error_metadata_map)
def test_make_groups(self):
"""Correctly generates the groups for trajectory_categories"""
# Test with all categories
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
exp_groups = {'Treatment': {'Control': ['PC.354', 'PC.355', 'PC.356',
'PC.481', 'PC.593'],
'Fast': ['PC.607', 'PC.634',
'PC.635', 'PC.636']},
'DOB': {'20061218': ['PC.354', 'PC.355'],
'20061126': ['PC.356'],
'20070314': ['PC.481'],
'20071210': ['PC.593'],
'20071112': ['PC.607'],
'20080116': ['PC.634', 'PC.635', 'PC.636']},
'Weight': {'60': ['PC.354'],
'55': ['PC.355'],
'50': ['PC.356'],
'52': ['PC.481'],
'57': ['PC.593'],
'65': ['PC.607'],
'68': ['PC.634'],
'70': ['PC.635'],
'72': ['PC.636']},
'Description': {'Control_mouse_I.D._354': ['PC.354'],
'Control_mouse_I.D._355': ['PC.355'],
'Control_mouse_I.D._356': ['PC.356'],
'Control_mouse_I.D._481': ['PC.481'],
'Control_mouse_I.D._593': ['PC.593'],
'Fasting_mouse_I.D._607': ['PC.607'],
'Fasting_mouse_I.D._634': ['PC.634'],
'Fasting_mouse_I.D._635': ['PC.635'],
'Fasting_mouse_I.D._636': ['PC.636']}}
self.assertEqual(bv._groups, exp_groups)
# Test with user-defined categories
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
trajectory_categories=['Treatment', 'DOB'])
exp_groups = {'Treatment': {'Control': ['PC.354', 'PC.355', 'PC.356',
'PC.481', 'PC.593'],
'Fast': ['PC.607', 'PC.634',
'PC.635', 'PC.636']},
'DOB': {'20061218': ['PC.354', 'PC.355'],
'20061126': ['PC.356'],
'20070314': ['PC.481'],
'20071210': ['PC.593'],
'20071112': ['PC.607'],
'20080116': ['PC.634', 'PC.635', 'PC.636']}}
self.assertEqual(bv._groups, exp_groups)
def test_make_groups_natural_sorting(self):
# Ensure sample IDs are sorted using a natural sorting algorithm.
df = pd.DataFrame.from_dict({
'a2': {'Col1': 'foo', 'Col2': '1.0'},
'a1': {'Col1': 'bar', 'Col2': '-42.0'},
'a11.0': {'Col1': 'foo', 'Col2': '2e-5'},
'a-10': {'Col1': 'foo', 'Col2': '5'},
'a10': {'Col1': 'bar', 'Col2': '5'}},
orient='index')
coords = pd.DataFrame.from_dict({
'a10': np.array([-0.212230626531, 0.216034194368, 0.03532727349]),
'a11.0': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'a1': np.array([0.220886492631, 0.0874848360559,
-0.351990132198]),
'a2': np.array([0.0308923744062, -0.0446295973489,
0.133996451689]),
'a-10': np.array([0.27616778138, -0.0341866951102,
0.0633000238256])},
orient='index')
prop_expl = np.array([25.6216900347, 15.7715955926, 14.1215046787,
11.6913885817, 9.83044890697])
# Sort by sample IDs.
ga = GradientANOVA(coords, prop_expl, df)
exp_groups = {
'Col1': {
'foo': ['a-10', 'a2', 'a11.0'],
'bar': ['a1', 'a10']
},
'Col2': {
'1.0': ['a2'],
'-42.0': ['a1'],
'2e-5': ['a11.0'],
'5': ['a-10', 'a10']
}
}
self.assertEqual(ga._groups, exp_groups)
# Sort sample IDs by Col2.
ga = GradientANOVA(coords, prop_expl, df,
trajectory_categories=['Col1'],
sort_category='Col2')
exp_groups = {
'Col1': {
'foo': ['a11.0', 'a2', 'a-10'],
'bar': ['a1', 'a10']
}
}
self.assertEqual(ga._groups, exp_groups)
def test_get_trajectories(self):
"""Should raise a NotImplementedError as this is a base class"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(NotImplementedError):
bv.get_trajectories()
def test_get_group_trajectories(self):
"""Should raise a NotImplementedError in usual execution as this is
a base class"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(NotImplementedError):
bv.get_trajectories()
def test_get_group_trajectories_error(self):
"""Should raise a RuntimeError if the user call _get_group_trajectories
with erroneous inputs"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(RuntimeError):
bv._get_group_trajectories("foo", ['foo'])
with self.assertRaises(RuntimeError):
bv._get_group_trajectories("bar", [])
def test_compute_trajectories_results(self):
"""Should raise a NotImplementedError as this is a base class"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(NotImplementedError):
bv._compute_trajectories_results("foo", [])
class AverageGradientANOVATests(BaseTests):
def test_get_trajectories_all(self):
"""get_trajectories returns the results of all categories"""
av = AverageGradientANOVA(self.coords, self.prop_expl,
self.metadata_map)
obs = av.get_trajectories()
exp_description = CategoryResults('Description', None, None,
'This group can not be used. All '
'groups should have more than 1 '
'element.')
exp_weight = CategoryResults('Weight', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
exp_dob = CategoryResults('DOB', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
exp_control_group = GroupResults('Control',
np.array([2.3694943596755276,
3.3716388181385781,
5.4452089176253367,
4.5704258453173559,
4.4972603724478377]),
4.05080566264,
{'avg': 4.0508056626409275}, None)
exp_fast_group = GroupResults('Fast', np.array([7.2220488239279126,
4.2726021564374372,
1.1169097274372082,
4.02717600030876]),
4.15968417703,
{'avg': 4.1596841770278292}, None)
exp_treatment = CategoryResults('Treatment', 0.93311555,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('avg', False, [exp_description, exp_weight,
exp_dob, exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_single(self):
"""get_trajectories returns the results of the provided category"""
av = AverageGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'])
obs = av.get_trajectories()
exp_control_group = GroupResults('Control',
np.array([2.3694943596755276,
3.3716388181385781,
5.4452089176253367,
4.5704258453173559,
4.4972603724478377]),
4.05080566264,
{'avg': 4.0508056626409275}, None)
exp_fast_group = GroupResults('Fast', np.array([7.2220488239279126,
4.2726021564374372,
1.1169097274372082,
4.02717600030876]),
4.15968417703,
{'avg': 4.1596841770278292}, None)
exp_treatment = CategoryResults('Treatment', 0.93311555,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('avg', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
"""get_trajectories returns the correct weighted results"""
av = AverageGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight', weighted=True)
obs = av.get_trajectories()
exp_control_group = GroupResults('Control', np.array([5.7926887872,
4.3242308936,
2.9212403501,
5.5400792151,
1.2326804315]),
3.9621839355,
{'avg': 3.9621839355}, None)
exp_fast_group = GroupResults('Fast', np.array([7.2187223286,
2.5522161282,
2.2349795861,
4.5278215248]),
4.1334348919,
{'avg': 4.1334348919}, None)
exp_treatment = CategoryResults('Treatment', 0.9057666800,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('avg', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
class TrajectoryGradientANOVATests(BaseTests):
def test_get_trajectories(self):
tv = TrajectoryGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight')
obs = tv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([8.6681963576,
7.0962717982,
7.1036434615,
4.0675712674]),
6.73392072123,
{'2-norm': 13.874494152}, None)
exp_fast_group = GroupResults('Fast', np.array([11.2291654905,
3.9163741156,
4.4943507388]),
6.5466301150,
{'2-norm': 12.713431181}, None)
exp_treatment = CategoryResults('Treatment', 0.9374500147,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('trajectory', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
tv = TrajectoryGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight', weighted=True)
obs = tv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([8.9850643421,
6.1617529749,
7.7989125908,
4.9666249268]),
6.9780887086,
{'2-norm': 14.2894710091}, None)
exp_fast_group = GroupResults('Fast', np.array([9.6823682852,
2.9511115209,
5.2434091953]),
5.9589630005,
{'2-norm': 11.3995901159}, None)
exp_treatment = CategoryResults('Treatment', 0.6248157720,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('trajectory', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
class FirstDifferenceGradientANOVATests(BaseTests):
def test_get_trajectories(self):
dv = FirstDifferenceGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight')
obs = dv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-1.5719245594,
0.0073716633,
-3.0360721941]),
-1.5335416967,
{'mean': -1.5335416967,
'std': 1.2427771485}, None)
exp_fast_group = GroupResults('Fast', np.array([-7.3127913749,
0.5779766231]),
-3.3674073758,
{'mean': -3.3674073758,
'std': 3.9453839990}, None)
exp_treatment = CategoryResults('Treatment', 0.6015260608,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('diff', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
dv = FirstDifferenceGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight',
weighted=True)
obs = dv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-2.8233113671,
1.6371596158,
-2.8322876639]),
-1.3394798050,
{'mean': -1.3394798050,
'std': 2.1048051097}, None)
exp_fast_group = GroupResults('Fast', np.array([-6.7312567642,
2.2922976743]),
-2.2194795449,
{'mean': -2.2194795449,
'std': 4.5117772193}, None)
exp_treatment = CategoryResults('Treatment', 0.8348644420,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('diff', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
class WindowDifferenceGradientANOVATests(BaseTests):
def test_get_trajectories(self):
wdv = WindowDifferenceGradientANOVA(
self.coords, self.prop_expl, self.metadata_map, 3,
trajectory_categories=['Treatment'], sort_category='Weight')
obs = wdv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-2.5790341819,
-2.0166764661,
-3.0360721941,
0.]),
-1.9079457105,
{'mean': -1.9079457105,
'std': 1.1592139913}, None)
exp_fast_group = GroupResults('Fast', np.array([11.2291654905,
3.9163741156,
4.4943507388]),
6.5466301150,
{'mean': 6.5466301150,
'std': 3.3194494926},
"Cannot calculate the first difference "
"with a window of size (3).")
exp_treatment = CategoryResults('Treatment', 0.0103976830,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('wdiff', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
wdv = WindowDifferenceGradientANOVA(
self.coords, self.prop_expl, self.metadata_map, 3,
trajectory_categories=['Treatment'], sort_category='Weight',
weighted=True)
obs = wdv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-2.6759675112,
-0.2510321601,
-2.8322876639,
0.]),
-1.4398218338,
{'mean': -1.4398218338,
'std': 1.31845790844}, None)
exp_fast_group = GroupResults('Fast', np.array([9.6823682852,
2.9511115209,
5.2434091953]),
5.9589630005,
{'mean': 5.9589630005,
'std': 2.7942163293},
"Cannot calculate the first difference "
"with a window of size (3).")
exp_treatment = CategoryResults('Treatment', 0.0110675605,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('wdiff', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
if __name__ == '__main__':
main()
| SamStudio8/scikit-bio | skbio/stats/tests/test_gradient.py | Python | bsd-3-clause | 54,863 | [
"scikit-bio"
] | f08330d34327deaab47c070293b7bac81d8f767eea6fd37528aef5d85707e508 |
LONG_HORN = 750
LONG_HORN_SPACE = LONG_HORN/2
SHORT_HORN = 400
SHORT_HORN_SPACE = SHORT_HORN/2
sequences = {
'CreekFleet': {
# Three long horns immediately (three minutes to start)
0: 'LLL',
# Two long horns a minute later (two minutes to start)
60000: 'LL',
# One short, three long horns 30s later (one minute 30s to start)
90000: 'LSSS',
# One long horn 30s later (one minute to start)
120000: 'L',
# Three short horns 30s later (30s to start)
150000: 'SSS',
160000: 'SS', # Two short horns 10s later (20s to start)
170000: 'S', # One short horn 10s later (10s to start)
175000: 'S', # One short horn 5s later (5s to start)
176000: 'S', # One short horn 1s later (4s to start)
177000: 'S', # One short horn 1s later (3s to start)
178000: 'S', # One short horn 1s later (2s to start)
179000: 'S', # One short horn 1s later (1s to start)
180000: 'L' # One long horn 1s later (START!)
},
'ISAF': {
# One short horn immediately (five minutes to start)
0: 'S',
# One short horn a minute later (four minutes to start)
60000: 'S',
# One long horn 3m later (one minute to start)
240000: 'L',
# One short horn 1m later (START!)
300000: 'S'
}
}
with open('firmware\\CreekFleet_Timer\\src\\horn.h', 'w') as f:
for option, sequence in sequences.items():
print(option)
HORN_TIMES = []
HORN_COMMANDS = []
for horn, blasts in sequence.items():
stepTime = horn
for blast in blasts:
HORN_TIMES.append(stepTime)
HORN_COMMANDS.append(True)
if blast == 'L':
stepTime += LONG_HORN
HORN_TIMES.append(stepTime)
HORN_COMMANDS.append(False)
stepTime += LONG_HORN_SPACE
if blast == 'S':
stepTime += SHORT_HORN
HORN_TIMES.append(stepTime)
HORN_COMMANDS.append(False)
stepTime += SHORT_HORN_SPACE
f.write(
f'uint8_t {option.upper()}_NUM_HORNS = {len(HORN_TIMES)};\n')
f.write(f'uint64_t {option.upper()}_HORN_TIMES[] = {{\n')
f.write(
',\n'.join([f'\t{time:.0f}' for time in HORN_TIMES])
+ '\n')
f.write(f'}};\nbool {option.upper()}_HORN_COMMANDS[] = {{\n')
f.write(
',\n'.join(
[f'\t{("false","true")[command]}' for command in HORN_COMMANDS])
+ '\n')
f.write('};\n')
print(list(zip(HORN_TIMES, HORN_COMMANDS)))
| agmlego/Creekfleet_Timer | firmware/horn_sequence.py | Python | mit | 2,928 | [
"BLAST"
] | c03825103194a28a92a092cc821739a10667c62bf207ff66b734021cf7482917 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.platform import test
class TransformerTest(test.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
source_code=None,
source_file=None,
namespace=None,
arg_values=None,
arg_types=None,
owner_type=None)
return transformer.Context(entity_info)
def test_entity_scope_tracking(self):
class TestTransformer(transformer.Base):
# The choice of note to assign to is arbitrary. Using Assign because it's
# easy to find in the tree.
def visit_Assign(self, node):
anno.setanno(node, 'enclosing_entities', self.enclosing_entities)
return self.generic_visit(node)
# This will show up in the lambda function.
def visit_BinOp(self, node):
anno.setanno(node, 'enclosing_entities', self.enclosing_entities)
return self.generic_visit(node)
tr = TestTransformer(self._simple_context())
def test_function():
a = 0
class TestClass(object):
def test_method(self):
b = 0
def inner_function(x):
c = 0
d = lambda y: (x + y)
return c, d
return b, inner_function
return a, TestClass
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
test_function_node = node.body[0]
test_class = test_function_node.body[1]
test_method = test_class.body[0]
inner_function = test_method.body[1]
lambda_node = inner_function.body[1].value
a = test_function_node.body[0]
b = test_method.body[0]
c = inner_function.body[0]
lambda_expr = lambda_node.body
self.assertEqual(
(test_function_node,), anno.getanno(a, 'enclosing_entities'))
self.assertEqual((test_function_node, test_class, test_method),
anno.getanno(b, 'enclosing_entities'))
self.assertEqual(
(test_function_node, test_class, test_method, inner_function),
anno.getanno(c, 'enclosing_entities'))
self.assertEqual((test_function_node, test_class, test_method,
inner_function, lambda_node),
anno.getanno(lambda_expr, 'enclosing_entities'))
def assertSameAnno(self, first, second, key):
self.assertIs(anno.getanno(first, key), anno.getanno(second, key))
def assertDifferentAnno(self, first, second, key):
self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key))
def test_state_tracking(self):
class LoopState(object):
pass
class CondState(object):
pass
class TestTransformer(transformer.Base):
def visit(self, node):
anno.setanno(node, 'loop_state', self.state[LoopState].value)
anno.setanno(node, 'cond_state', self.state[CondState].value)
return super(TestTransformer, self).visit(node)
def visit_While(self, node):
self.state[LoopState].enter()
node = self.generic_visit(node)
self.state[LoopState].exit()
return node
def visit_If(self, node):
self.state[CondState].enter()
node = self.generic_visit(node)
self.state[CondState].exit()
return node
tr = TestTransformer(self._simple_context())
def test_function(a):
a = 1
while a:
_ = 'a'
if a > 2:
_ = 'b'
while True:
raise '1'
if a > 3:
_ = 'c'
while True:
raise '1'
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
fn_body = node.body[0].body
outer_while_body = fn_body[1].body
self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state')
self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state')
first_if_body = outer_while_body[1].body
self.assertDifferentAnno(outer_while_body[0], first_if_body[0],
'cond_state')
self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state')
first_inner_while_body = first_if_body[1].body
self.assertSameAnno(first_if_body[0], first_inner_while_body[0],
'cond_state')
self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0],
'loop_state')
second_if_body = outer_while_body[2].body
self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state')
self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state')
second_inner_while_body = second_if_body[1].body
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'cond_state')
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'loop_state')
def test_local_scope_info_stack(self):
class TestTransformer(transformer.Base):
# Extract all string constants from the block.
def visit_Str(self, node):
self.set_local('string', self.get_local('string', default='') + node.s)
return self.generic_visit(node)
def _annotate_result(self, node):
self.enter_local_scope()
node = self.generic_visit(node)
anno.setanno(node, 'test', self.get_local('string'))
self.exit_local_scope()
return node
def visit_While(self, node):
return self._annotate_result(node)
def visit_For(self, node):
return self._annotate_result(node)
tr = TestTransformer(self._simple_context())
def test_function(a):
"""Docstring."""
assert a == 'This should not be counted'
for i in range(3):
_ = 'a'
if i > 2:
return 'b'
else:
_ = 'c'
while True:
raise '1'
return 'nor this'
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
for_node = node.body[0].body[2]
while_node = for_node.body[1].orelse[1]
self.assertFalse(anno.hasanno(for_node, 'string'))
self.assertEqual('abc', anno.getanno(for_node, 'test'))
self.assertFalse(anno.hasanno(while_node, 'string'))
self.assertEqual('1', anno.getanno(while_node, 'test'))
def test_local_scope_info_stack_checks_integrity(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
self.enter_local_scope()
return self.generic_visit(node)
def visit_For(self, node):
node = self.generic_visit(node)
self.exit_local_scope()
return node
tr = TestTransformer(self._simple_context())
def no_exit(a):
if a > 0:
print(a)
return None
node, _ = parser.parse_entity(no_exit)
with self.assertRaises(AssertionError):
tr.visit(node)
def no_entry(a):
for _ in a:
print(a)
node, _ = parser.parse_entity(no_entry)
with self.assertRaises(AssertionError):
tr.visit(node)
def test_visit_block_postprocessing(self):
class TestTransformer(transformer.Base):
def _process_body_item(self, node):
if isinstance(node, gast.Assign) and (node.value.id == 'y'):
if_node = gast.If(gast.Name('x', gast.Load(), None), [node], [])
return if_node, if_node.body
return node, None
def visit_FunctionDef(self, node):
node.body = self.visit_block(
node.body, after_visit=self._process_body_item)
return node
def test_function(x, y):
z = x
z = y
return z
tr = TestTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
node = node.body[0]
self.assertEqual(len(node.body), 2)
self.assertTrue(isinstance(node.body[0], gast.Assign))
self.assertTrue(isinstance(node.body[1], gast.If))
self.assertTrue(isinstance(node.body[1].body[0], gast.Assign))
self.assertTrue(isinstance(node.body[1].body[1], gast.Return))
def test_robust_error_on_list_visit(self):
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
# This is broken because visit expects a single node, not a list, and
# the body of an if is a list.
# Importantly, the default error handling in visit also expects a single
# node. Therefore, mistakes like this need to trigger a type error
# before the visit called here installs its error handler.
# That type error can then be caught by the enclosing call to visit,
# and correctly blame the If node.
self.visit(node.body)
return node
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function)
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
expected_message = r'expected "ast.AST", got "\<(type|class) \'list\'\>"'
self.assertRegexpMatches(obtained_message, expected_message)
def test_robust_error_on_ast_corruption(self):
# A child class should not be able to be so broken that it causes the error
# handling in `transformer.Base` to raise an exception. Why not? Because
# then the original error location is dropped, and an error handler higher
# up in the call stack gives misleading information.
# Here we test that the error handling in `visit` completes, and blames the
# correct original exception, even if the AST gets corrupted.
class NotANode(object):
pass
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
node.body = NotANode()
raise ValueError('I blew up')
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function)
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
# The message should reference the exception actually raised, not anything
# from the exception handler.
expected_substring = 'I blew up'
self.assertTrue(expected_substring in obtained_message, obtained_message)
if __name__ == '__main__':
test.main()
| theflofly/tensorflow | tensorflow/python/autograph/pyct/transformer_test.py | Python | apache-2.0 | 11,209 | [
"VisIt"
] | 120c441558d1acd619f7d9d357554c2c04d1eaba30d9c2eab353f91230c0fc15 |
# -*- coding: utf-8 -*-
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import warnings
from collections.abc import Iterable
from functools import wraps
import ctypes
import numpy as np
from scipy._lib.doccer import (extend_notes_in_docstring,
replace_notes_in_docstring)
from scipy._lib._ccallback import LowLevelCallable
from scipy import optimize
from scipy import integrate
import scipy.special as sc
import scipy.special._ufuncs as scu
from scipy._lib._util import _lazyselect, _lazywhere
from . import _stats
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (
get_distribution_names, _kurtosis, _ncx2_cdf, _ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, _get_fixed_fit_value, _check_shape)
from ._ksstats import kolmogn, kolmognp, kolmogni
from ._constants import (_XMIN, _EULER, _ZETA3,
_SQRT_2_OVER_PI, _LOG_SQRT_2_OVER_PI)
import scipy.stats._boost as _boost
def _remove_optimizer_parameters(kwds):
"""
Remove the optimizer-related keyword arguments 'loc', 'scale' and
'optimizer' from `kwds`. Then check that `kwds` is empty, and
raise `TypeError("Unknown arguments: %s." % kwds)` if it is not.
This function is used in the fit method of distributions that override
the default method and do not use the default optimization code.
`kwds` is modified in-place.
"""
kwds.pop('loc', None)
kwds.pop('scale', None)
kwds.pop('optimizer', None)
kwds.pop('method', None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
def _call_super_mom(fun):
# if fit method is overridden only for MLE and doesn't specify what to do
# if method == 'mm', this decorator calls generic implementation
@wraps(fun)
def wrapper(self, *args, **kwds):
method = kwds.get('method', 'mle').lower()
if method == 'mm':
return super(type(self), self).fit(*args, **kwds)
else:
return fun(self, *args, **kwds)
return wrapper
class ksone_gen(rv_continuous):
r"""Kolmogorov-Smirnov one-sided test statistic distribution.
This is the distribution of the one-sided Kolmogorov-Smirnov (KS)
statistics :math:`D_n^+` and :math:`D_n^-`
for a finite sample size ``n`` (the shape parameter).
%(before_notes)s
See Also
--------
kstwobign, kstwo, kstest
Notes
-----
:math:`D_n^+` and :math:`D_n^-` are given by
.. math::
D_n^+ &= \text{sup}_x (F_n(x) - F(x)),\\
D_n^- &= \text{sup}_x (F(x) - F_n(x)),\\
where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
`ksone` describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
References
----------
.. [1] Birnbaum, Z. W. and Tingey, F.H. "One-sided confidence contours
for probability distribution functions", The Annals of Mathematical
Statistics, 22(4), pp 592-596 (1951).
%(example)s
"""
def _pdf(self, x, n):
return -scu._smirnovp(n, x)
def _cdf(self, x, n):
return scu._smirnovc(n, x)
def _sf(self, x, n):
return sc.smirnov(n, x)
def _ppf(self, q, n):
return scu._smirnovci(n, q)
def _isf(self, q, n):
return sc.smirnovi(n, q)
ksone = ksone_gen(a=0.0, b=1.0, name='ksone')
class kstwo_gen(rv_continuous):
r"""Kolmogorov-Smirnov two-sided test statistic distribution.
This is the distribution of the two-sided Kolmogorov-Smirnov (KS)
statistic :math:`D_n` for a finite sample size ``n``
(the shape parameter).
%(before_notes)s
See Also
--------
kstwobign, ksone, kstest
Notes
-----
:math:`D_n` is given by
.. math::
D_n = \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a (continuous) CDF and :math:`F_n` is an empirical CDF.
`kstwo` describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
References
----------
.. [1] Simard, R., L'Ecuyer, P. "Computing the Two-Sided
Kolmogorov-Smirnov Distribution", Journal of Statistical Software,
Vol 39, 11, 1-18 (2011).
%(example)s
"""
def _get_support(self, n):
return (0.5/(n if not isinstance(n, Iterable) else np.asanyarray(n)),
1.0)
def _pdf(self, x, n):
return kolmognp(n, x)
def _cdf(self, x, n):
return kolmogn(n, x)
def _sf(self, x, n):
return kolmogn(n, x, cdf=False)
def _ppf(self, q, n):
return kolmogni(n, q, cdf=True)
def _isf(self, q, n):
return kolmogni(n, q, cdf=False)
# Use the pdf, (not the ppf) to compute moments
kstwo = kstwo_gen(momtype=0, a=0.0, b=1.0, name='kstwo')
class kstwobign_gen(rv_continuous):
r"""Limiting distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
This is the asymptotic distribution of the two-sided Kolmogorov-Smirnov
statistic :math:`\sqrt{n} D_n` that measures the maximum absolute
distance of the theoretical (continuous) CDF from the empirical CDF.
(see `kstest`).
%(before_notes)s
See Also
--------
ksone, kstwo, kstest
Notes
-----
:math:`\sqrt{n} D_n` is given by
.. math::
D_n = \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
`kstwobign` describes the asymptotic distribution (i.e. the limit of
:math:`\sqrt{n} D_n`) under the null hypothesis of the KS test that the
empirical CDF corresponds to i.i.d. random variates with CDF :math:`F`.
%(after_notes)s
References
----------
.. [1] Feller, W. "On the Kolmogorov-Smirnov Limit Theorems for Empirical
Distributions", Ann. Math. Statist. Vol 19, 177-189 (1948).
%(example)s
"""
def _pdf(self, x):
return -scu._kolmogp(x)
def _cdf(self, x):
return scu._kolmogc(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return scu._kolmogci(q)
def _isf(self, q):
return sc.kolmogi(q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
r"""A normal continuous random variable.
The location (``loc``) keyword specifies the mean.
The scale (``scale``) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is:
.. math::
f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.standard_normal(size)
def _pdf(self, x):
# norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
For the normal distribution, method of moments and maximum likelihood
estimation give identical fits, and explicit formulas for the estimates
are available.
This function uses these explicit formulas for the maximum likelihood
estimation of the normal distribution parameters, so the
`optimizer` and `method` arguments are ignored.\n\n""")
def fit(self, data, **kwds):
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
def _munp(self, n):
"""
@returns Moments of standard normal distribution for integer n >= 0
See eq. 16 of https://arxiv.org/abs/1209.4340v2
"""
if n % 2 == 0:
return sc.factorial2(n - 1)
else:
return 0.
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
r"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` ([1]_, [2]_) is:
.. math::
f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} *
\exp(-\frac{1}{2} (a-1/x)^2)
where :math:`\Phi` is the normal CDF, :math:`x > 0`, and :math:`a > 0`.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] Johnson, Kotz, and Balakrishnan, "Continuous Univariate
Distributions, Volume 1", Second Edition, John Wiley and Sons,
p. 173 (1994).
.. [2] Anthony A. Salvia, "Reliability applications of the Alpha
Distribution", IEEE Transactions on Reliability, Vol. R-34,
No. 3, pp. 251-252 (1985).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
r"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is:
.. math::
f(x) = \sin(2x + \pi/2) = \cos(2x)
for :math:`-\pi/4 \le x \le \pi/4`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x)
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
r"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is:
.. math::
f(x) = \frac{1}{\pi \sqrt{x (1-x)}}
for :math:`0 < x < 1`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
with np.errstate(divide='ignore'):
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < "
"(x - loc)/scale < {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
r"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is:
.. math::
f(x, a, b) = \frac{\Gamma(a+b) x^{a-1} (1-x)^{b-1}}
{\Gamma(a) \Gamma(b)}
for :math:`0 <= x <= 1`, :math:`a > 0`, :math:`b > 0`, where
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`beta` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b, size=None, random_state=None):
return random_state.beta(a, b, size)
def _pdf(self, x, a, b):
# gamma(a+b) * x**(a-1) * (1-x)**(b-1)
# beta.pdf(x, a, b) = ------------------------------------
# gamma(a)*gamma(b)
return _boost._beta_pdf(x, a, b)
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return _boost._beta_cdf(x, a, b)
def _sf(self, x, a, b):
return _boost._beta_sf(x, a, b)
def _isf(self, x, a, b):
with warnings.catch_warnings():
# See gh-14901
message = "overflow encountered in _beta_isf"
warnings.filterwarnings('ignore', message=message)
return _boost._beta_isf(x, a, b)
def _ppf(self, q, a, b):
with warnings.catch_warnings():
message = "overflow encountered in _beta_ppf"
warnings.filterwarnings('ignore', message=message)
return _boost._beta_ppf(q, a, b)
def _stats(self, a, b):
return(
_boost._beta_mean(a, b),
_boost._beta_variance(a, b),
_boost._beta_skewness(a, b),
_boost._beta_kurtosis_excess(a, b))
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super()._fitstart(data, args=(a, b))
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
In the special case where `method="MLE"` and
both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.\n\n""")
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super().fit(data, *args, **kwds)
# We already got these from kwds, so just pop them.
kwds.pop('floc', None)
kwds.pop('fscale', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
f1 = _get_fixed_fit_value(kwds, ['f1', 'fb', 'fix_b'])
_remove_optimizer_parameters(kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
r"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is:
.. math::
f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)}
for :math:`x >= 0`, :math:`a > 0`, :math:`b > 0`, where
:math:`\beta(a, b)` is the beta function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b, size=None, random_state=None):
u1 = gamma.rvs(a, size=size, random_state=random_state)
u2 = gamma.rvs(b, size=size, random_state=random_state)
return u1 / u2
def _pdf(self, x, a, b):
# betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
r"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is:
.. math::
f(x, c) = \frac{c}{\log(1+c) (1+cx)}
for :math:`0 <= x <= 1` and :math:`c > 0`.
`bradford` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# bradford.pdf(x, c) = c / (k * (1+c*x))
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) +
6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
r"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr12 : Burr Type XII distribution
mielke : Mielke Beta-Kappa / Dagum distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{-c - 1} / (1 + x^{-c})^{d + 1}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr` takes :math:`c` and :math:`d` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_. The distribution
is also commonly referred to as the Dagum distribution [2]_. If the
parameter :math:`c < 1` then the mean of the distribution does not
exist and if :math:`c < 2` the variance does not exist [2]_.
The PDF is finite at the left endpoint :math:`x = 0` if :math:`c * d >= 1`.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://en.wikipedia.org/wiki/Dagum_distribution
.. [3] Kleiber, Christian. "A guide to the Dagum distributions."
Modeling Income Distributions and Lorenz Curves pp 97-117 (2008).
%(example)s
"""
# Do not set _support_mask to rv_continuous._open_support_mask
# Whether the left-hand endpoint is suitable for pdf evaluation is dependent
# on the values of c and d: if c*d >= 1, the pdf is finite, otherwise infinite.
def _pdf(self, x, c, d):
# burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
output = _lazywhere(x == 0, [x, c, d],
lambda x_, c_, d_: c_ * d_ * (x_**(c_*d_-1)) / (1 + x_**c_),
f2 = lambda x_, c_, d_: (c_ * d_ * (x_ ** (-c_ - 1.0)) /
((1 + x_ ** (-c_)) ** (d_ + 1.0))))
if output.ndim == 0:
return output[()]
return output
def _logpdf(self, x, c, d):
output = _lazywhere(
x == 0, [x, c, d],
lambda x_, c_, d_: (np.log(c_) + np.log(d_) + sc.xlogy(c_*d_ - 1, x_)
- (d_+1) * sc.log1p(x_**(c_))),
f2 = lambda x_, c_, d_: (np.log(c_) + np.log(d_)
+ sc.xlogy(-c_ - 1, x_)
- sc.xlog1py(d_+1, x_**(-c_))))
if output.ndim == 0:
return output[()]
return output
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _logcdf(self, x, c, d):
return sc.log1p(x**(-c)) * (-d)
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return np.log1p(- (1 + x**(-c))**(-d))
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _stats(self, c, d):
nc = np.arange(1, 5).reshape(4,1) / c
#ek is the kth raw moment, e1 is the mean e2-e1**2 variance etc.
e1, e2, e3, e4 = sc.beta(d + nc, 1. - nc) * d
mu = np.where(c > 1.0, e1, np.nan)
mu2_if_c = e2 - mu**2
mu2 = np.where(c > 2.0, mu2_if_c, np.nan)
g1 = _lazywhere(
c > 3.0,
(c, e1, e2, e3, mu2_if_c),
lambda c, e1, e2, e3, mu2_if_c: (e3 - 3*e2*e1 + 2*e1**3) / np.sqrt((mu2_if_c)**3),
fillvalue=np.nan)
g2 = _lazywhere(
c > 4.0,
(c, e1, e2, e3, e4, mu2_if_c),
lambda c, e1, e2, e3, e4, mu2_if_c: (
((e4 - 4*e3*e1 + 6*e2*e1**2 - 3*e1**4) / mu2_if_c**2) - 3),
fillvalue=np.nan)
if np.ndim(c) == 0:
return mu.item(), mu2.item(), g1.item(), g2.item()
return mu, mu2, g1, g2
def _munp(self, n, c, d):
def __munp(n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
n, c, d = np.asarray(n), np.asarray(c), np.asarray(d)
return _lazywhere((c > n) & (n == n) & (d == d), (c, d, n),
lambda c, d, n: __munp(n, c, d),
np.nan)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
r"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{c-1} / (1 + x^c)^{d + 1}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr12` takes ``c`` and ``d`` as shape parameters for :math:`c`
and :math:`d`.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
.. [3] "Burr distribution",
https://en.wikipedia.org/wiki/Burr_distribution
%(example)s
"""
def _pdf(self, x, c, d):
# burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
r"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution.
%(before_notes)s
See Also
--------
burr
Notes
-----
The probability density function for `fisk` is:
.. math::
f(x, c) = c x^{-c-1} (1 + x^{-c})^{-2}
for :math:`x >= 0` and :math:`c > 0`.
`fisk` takes ``c`` as a shape parameter for :math:`c`.
`fisk` is a special case of `burr` or `burr12` with ``d=1``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._pdf(x, c, 1.0)
def _cdf(self, x, c):
return burr._cdf(x, c, 1.0)
def _sf(self, x, c):
return burr._sf(x, c, 1.0)
def _logpdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._logpdf(x, c, 1.0)
def _logcdf(self, x, c):
return burr._logcdf(x, c, 1.0)
def _logsf(self, x, c):
return burr._logsf(x, c, 1.0)
def _ppf(self, x, c):
return burr._ppf(x, c, 1.0)
def _munp(self, n, c):
return burr._munp(n, c, 1.0)
def _stats(self, c):
return burr._stats(c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
class cauchy_gen(rv_continuous):
r"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is
.. math::
f(x) = \frac{1}{\pi (1 + x^2)}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cauchy.pdf(x) = 1 / (pi * (1 + x**2))
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
r"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is:
.. math::
f(x, k) = \frac{1}{2^{k/2-1} \Gamma \left( k/2 \right)}
x^{k-1} \exp \left( -x^2/2 \right)
for :math:`x >= 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation). :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, size=None, random_state=None):
return np.sqrt(chi2.rvs(df, size=size, random_state=random_state))
def _pdf(self, x, df):
# x**(df-1) * exp(-x**2/2)
# chi.pdf(x, df) = -------------------------
# 2**(df/2-1) * gamma(df/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _sf(self, x, df):
return sc.gammaincc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _isf(self, q, df):
return np.sqrt(2*sc.gammainccinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*np.exp(sc.gammaln(df/2.0+0.5)-sc.gammaln(df/2.0))
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
class chi2_gen(rv_continuous):
r"""A chi-squared continuous random variable.
For the noncentral chi-square distribution, see `ncx2`.
%(before_notes)s
See Also
--------
ncx2
Notes
-----
The probability density function for `chi2` is:
.. math::
f(x, k) = \frac{1}{2^{k/2} \Gamma \left( k/2 \right)}
x^{k/2-1} \exp \left( -x/2 \right)
for :math:`x > 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation).
`chi2` takes ``df`` as a shape parameter.
The chi-squared distribution is a special case of the gamma
distribution, with gamma parameters ``a = df/2``, ``loc = 0`` and
``scale = 2``.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, size=None, random_state=None):
return random_state.chisquare(df, size)
def _pdf(self, x, df):
# chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return 2*sc.gammaincinv(df/2, p)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
r"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is:
.. math::
f(x) = \frac{1}{2\pi} (1+\cos(x))
for :math:`-\pi \le x \le \pi`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
return 1.0/2/np.pi*(1+np.cos(x))
def _logpdf(self, x):
c = np.cos(x)
return _lazywhere(c != -1, (c,),
lambda c: np.log1p(c) - np.log(2*np.pi),
fillvalue=-np.inf)
def _cdf(self, x):
return scu._cosine_cdf(x)
def _sf(self, x):
return scu._cosine_cdf(-x)
def _ppf(self, p):
return scu._cosine_invcdf(p)
def _isf(self, p):
return -scu._cosine_invcdf(p)
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
r"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is:
.. math::
f(x, a) = \frac{1}{2\Gamma(a)} |x|^{a-1} \exp(-|x|)
for a real number :math:`x` and :math:`a > 0`. :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`dgamma` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, size=None, random_state=None):
u = random_state.uniform(size=size)
gm = gamma.rvs(a, size=size, random_state=random_state)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
# dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
r"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is given by
.. math::
f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c)
for a real number :math:`x` and :math:`c > 0`.
`dweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
u = random_state.uniform(size=size)
w = weibull_min.rvs(c, size=size, random_state=random_state)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
# dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
class expon_gen(rv_continuous):
r"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is:
.. math::
f(x) = \exp(-x)
for :math:`x \ge 0`.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
The exponential distribution is a special case of the gamma
distributions, with gamma shape parameter ``a = 1``.
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.standard_exponential(size)
def _pdf(self, x):
# expon.pdf(x) = exp(-x)
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
When `method='MLE'`,
this function uses explicit formulas for the maximum likelihood
estimation of the exponential distribution parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are
ignored.\n\n""")
def fit(self, data, *args, **kwds):
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
data_min = data.min()
if floc is None:
# ML estimate of the location is the minimum of the data.
loc = data_min
else:
loc = floc
if data_min < loc:
# There are values that are less than the specified loc.
raise FitDataError("expon", lower=floc, upper=np.inf)
if fscale is None:
# ML estimate of the scale is the shifted mean.
scale = data.mean() - loc
else:
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
expon = expon_gen(a=0.0, name='expon')
class exponnorm_gen(rv_continuous):
r"""An exponentially modified Normal continuous random variable.
Also known as the exponentially modified Gaussian distribution [1]_.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is:
.. math::
f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2} - x / K \right)
\text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right)
where :math:`x` is a real number and :math:`K > 0`.
It can be thought of as the sum of a standard normal random variable
and an independent exponentially distributed random variable with rate
``1/K``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
the Wikpedia article [1]_) involves three parameters, :math:`\mu`,
:math:`\lambda` and :math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/(\sigma\lambda)`.
.. versionadded:: 0.16.0
References
----------
.. [1] Exponentially modified Gaussian distribution, Wikipedia,
https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
%(example)s
"""
def _rvs(self, K, size=None, random_state=None):
expval = random_state.standard_exponential(size) * K
gval = random_state.standard_normal(size)
return expval + gval
def _pdf(self, x, K):
return np.exp(self._logpdf(x, K))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = invK * (0.5 * invK - x)
return exparg + _norm_logcdf(x - invK) - np.log(K)
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
logprod = expval + _norm_logcdf(x - invK)
return _norm_cdf(x) - np.exp(logprod)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
logprod = expval + _norm_logcdf(x - invK)
return _norm_cdf(-x) + np.exp(logprod)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
r"""An exponentiated Weibull continuous random variable.
%(before_notes)s
See Also
--------
weibull_min, numpy.random.Generator.weibull
Notes
-----
The probability density function for `exponweib` is:
.. math::
f(x, a, c) = a c [1-\exp(-x^c)]^{a-1} \exp(-x^c) x^{c-1}
and its cumulative distribution function is:
.. math::
F(x, a, c) = [1-\exp(-x^c)]^a
for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`.
`exponweib` takes :math:`a` and :math:`c` as shape parameters:
* :math:`a` is the exponentiation parameter,
with the special case :math:`a=1` corresponding to the
(non-exponentiated) Weibull distribution `weibull_min`.
* :math:`c` is the shape parameter of the non-exponentiated Weibull law.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Exponentiated_Weibull_distribution
%(example)s
"""
def _pdf(self, x, a, c):
# exponweib.pdf(x, a, c) =
# a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
r"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is:
.. math::
f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b))
for :math:`x \ge 0`, :math:`b > 0`. Note that this is a different
distribution from the exponential power distribution that is also known
under the names "generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
# exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
r"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is:
.. math::
f(x, c) = \frac{x+1}{2c\sqrt{2\pi x^3}} \exp(-\frac{(x-1)^2}{2x c^2})
for :math:`x >= 0` and :math:`c > 0`.
`fatiguelife` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
https://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c, size=None, random_state=None):
z = random_state.standard_normal(size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
# fatiguelife.pdf(x, c) =
# (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _sf(self, x, c):
return _norm_sf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _isf(self, q, c):
tmp = -c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
r"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is:
.. math::
f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)}
for :math:`x \ge 0`.
`foldcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return abs(cauchy.rvs(loc=c, size=size,
random_state=random_state))
def _pdf(self, x, c):
# foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
r"""An F continuous random variable.
For the noncentral F distribution, see `ncf`.
%(before_notes)s
See Also
--------
ncf
Notes
-----
The probability density function for `f` is:
.. math::
f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}}
{(df_2+df_1 x)^{(df_1+df_2)/2}
B(df_1/2, df_2/2)}
for :math:`x > 0`.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, size=None, random_state=None):
return random_state.f(dfn, dfd, size)
def _pdf(self, x, dfn, dfd):
# df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
# F.pdf(x, df1, df2) = --------------------------------------------
# (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = (m/2 * np.log(m) + n/2 * np.log(n) + sc.xlogy(n/2 - 1, x)
- (((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)))
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
r"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is:
.. math::
f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2})
for :math:`c \ge 0`.
`foldnorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c, size=None, random_state=None):
return abs(random_state.standard_normal(size) + c)
def _pdf(self, x, c):
# foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# https://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
class weibull_min_gen(rv_continuous):
r"""Weibull minimum continuous random variable.
The Weibull Minimum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is also often simply called the Weibull
distribution. It arises as the limiting distribution of the rescaled
minimum of iid random variables.
%(before_notes)s
See Also
--------
weibull_max, numpy.random.Generator.weibull, exponweib
Notes
-----
The probability density function for `weibull_min` is:
.. math::
f(x, c) = c x^{c-1} \exp(-x^c)
for :math:`x > 0`, :math:`c > 0`.
`weibull_min` takes ``c`` as a shape parameter for :math:`c`.
(named :math:`k` in Wikipedia article and :math:`a` in
``numpy.random.weibull``). Special shape values are :math:`c=1` and
:math:`c=2` where Weibull distribution reduces to the `expon` and
`rayleigh` distributions respectively.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _pdf(self, x, c):
# weibull_min.pdf(x, c) = c * x**(c-1) * exp(-x**c)
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_min = weibull_min_gen(a=0.0, name='weibull_min')
class weibull_max_gen(rv_continuous):
r"""Weibull maximum continuous random variable.
The Weibull Maximum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is the limiting distribution of rescaled
maximum of iid random variables. This is the distribution of -X
if X is from the `weibull_min` function.
%(before_notes)s
See Also
--------
weibull_min
Notes
-----
The probability density function for `weibull_max` is:
.. math::
f(x, c) = c (-x)^{c-1} \exp(-(-x)^c)
for :math:`x < 0`, :math:`c > 0`.
`weibull_max` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _pdf(self, x, c):
# weibull_max.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_max = weibull_max_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
r"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is:
.. math::
f(x, c) = c \frac{\exp(-x)}
{(1 + \exp(-x))^{c+1}}
for :math:`x >= 0`, :math:`c > 0`.
`genlogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
# Two mathematically equivalent expressions for log(pdf(x, c)):
# log(pdf(x, c)) = log(c) - x - (c + 1)*log(1 + exp(-x))
# = log(c) + c*x - (c + 1)*log(1 + exp(x))
mult = -(c - 1) * (x < 0) - 1
absx = np.abs(x)
return np.log(c) + mult*absx - (c+1) * sc.log1p(np.exp(-absx))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
r"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is:
.. math::
f(x, c) = (1 + c x)^{-1 - 1/c}
defined for :math:`x \ge 0` if :math:`c \ge 0`, and for
:math:`0 \le x \le -1/c` if :math:`c < 0`.
`genpareto` takes ``c`` as a shape parameter for :math:`c`.
For :math:`c=0`, `genpareto` reduces to the exponential
distribution, `expon`:
.. math::
f(x, 0) = \exp(-x)
For :math:`c=-1`, `genpareto` is uniform on ``[0, 1]``:
.. math::
f(x, -1) = 1
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.isfinite(c)
def _get_support(self, c):
c = np.asarray(c)
b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
a = np.where(c >= 0, self.a, self.a)
return a, b
def _pdf(self, x, c):
# genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _stats(self, c, moments='mv'):
if 'm' not in moments:
m = None
else:
m = _lazywhere(c < 1, (c,),
lambda xi: 1/(1 - xi),
np.inf)
if 'v' not in moments:
v = None
else:
v = _lazywhere(c < 1/2, (c,),
lambda xi: 1 / (1 - xi)**2 / (1 - 2*xi),
np.nan)
if 's' not in moments:
s = None
else:
s = _lazywhere(c < 1/3, (c,),
lambda xi: 2 * (1 + xi) * np.sqrt(1 - 2*xi) /
(1 - 3*xi),
np.nan)
if 'k' not in moments:
k = None
else:
k = _lazywhere(c < 1/4, (c,),
lambda xi: 3 * (1 - 2*xi) * (2*xi**2 + xi + 3) /
(1 - 3*xi) / (1 - 4*xi) - 3,
np.nan)
return m, v, s, k
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
r"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is:
.. math::
f(x, a, b, c) = (a + b (1 - \exp(-c x)))
\exp(-a x - b x + \frac{b}{c} (1-\exp(-c x)))
for :math:`x \ge 0`, :math:`a, b, c > 0`.
`genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
# genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
# exp(-a*x - b*x + b/c * (1-exp(-c*x)))
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _sf(self, x, a, b, c):
return np.exp((-a-b)*x + b*(-sc.expm1(-c*x))/c)
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
r"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For :math:`c=0`, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is:
.. math::
f(x, c) = \begin{cases}
\exp(-\exp(-x)) \exp(-x) &\text{for } c = 0\\
\exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1} &\text{for }
x \le 1/c, c > 0
\end{cases}
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter :math:`c`.
`genextreme` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.where(abs(c) == np.inf, 0, 1)
def _get_support(self, c):
_b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
_a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return _a, _b
def _loglogcdf(self, x, c):
# Returns log(-log(cdf(x, c)))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
# genextreme.pdf(x, c) =
# exp(-exp(-x))*exp(-x), for c==0
# exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x \le 1/c, c > 0
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = _lazywhere(~((cx == 1) | (cx == -np.inf)),
(pex2, logpex2, logex2),
lambda pex2, lpex2, lex2: -pex2 + lpex2 - lex2,
fillvalue=-np.inf)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = _lazywhere(c >= -1./3,
(c, g1, g2, g3, g2mg12),
lambda c, g1, g2, g3, g2gm12:
np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5,
fillvalue=np.nan)
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = _lazywhere(c >= -1./4,
(g1, g2, g3, g4, g2mg12),
lambda g1, g2, g3, g4, g2mg12:
(g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2,
fillvalue=np.nan)
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super()._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
"""Inverse of the digamma function (real positive arguments only).
This function is used in the `fit` method of `gamma_gen`.
The function uses either optimize.fsolve or optimize.newton
to solve `sc.digamma(x) - y = 0`. There is probably room for
improvement, but currently it works over a wide range of y:
>>> rng = np.random.default_rng()
>>> y = 64*rng.standard_normal(1000000)
>>> y.min(), y.max()
(-311.43592651416662, 351.77388222276869)
>>> x = [_digammainv(t) for t in y]
>>> np.abs(sc.digamma(x) - y).max()
1.1368683772161603e-13
"""
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
r"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is:
.. math::
f(x, a) = \frac{x^{a-1} e^{-x}}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the
gamma function.
`gamma` takes ``a`` as a shape parameter for :math:`a`.
When :math:`a` is an integer, `gamma` reduces to the Erlang
distribution, and when :math:`a=1` to the exponential distribution.
Gamma distributions are sometimes parameterized with two variables,
with a probability density function of:
.. math::
f(x, \alpha, \beta) = \frac{\beta^\alpha x^{\alpha - 1} e^{-\beta x }}{\Gamma(\alpha)}
Note that this parameterization is equivalent to the above, with
``scale = 1 / beta``.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, size=None, random_state=None):
return random_state.standard_gamma(a, size)
def _pdf(self, x, a):
# gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _isf(self, q, a):
return sc.gammainccinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `2 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super()._fitstart(data, args=(a,))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location is fixed by using the argument `floc`
and `method='MLE'`, this
function uses explicit formulas or solves a simpler numerical
problem than the full ML optimization problem. So in that case,
the `optimizer`, `loc` and `scale` arguments are ignored.
\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
method = kwds.get('method', 'mle')
if floc is None or method.lower() == 'mm':
# loc is not fixed. Use the default fit method.
return super().fit(data, *args, **kwds)
# We already have this value, so just pop it from kwds.
kwds.pop('floc', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) +
# np.log(data).mean() = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return a > 0
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super().fit(data, *args, **kwds)
if fit.__doc__:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
r"""A generalized gamma continuous random variable.
%(before_notes)s
See Also
--------
gamma, invgamma, weibull_min
Notes
-----
The probability density function for `gengamma` is ([1]_):
.. math::
f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`.
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gengamma` takes :math:`a` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
.. [1] E.W. Stacy, "A Generalization of the Gamma Distribution",
Annals of Mathematical Statistics, Vol 33(3), pp. 1187--1192.
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return _lazywhere((x != 0) | (c > 0), (x, c),
lambda x, c: (np.log(abs(c)) + sc.xlogy(c*a - 1, x)
- x**c - sc.gammaln(a)),
fillvalue=-np.inf)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _rvs(self, a, c, size=None, random_state=None):
r = random_state.standard_gamma(a, size=size)
return r**(1./c)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
r"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is:
.. math::
f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2}
for :math:`0 \le x \le 1/c`, and :math:`c > 0`.
`genhalflogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c > 0
def _get_support(self, c):
return self.a, 1.0/c
def _pdf(self, x, c):
# genhalflogistic.pdf(x, c) =
# 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class genhyperbolic_gen(rv_continuous):
r"""A generalized hyperbolic continuous random variable.
%(before_notes)s
See Also
--------
t, norminvgauss, geninvgauss, laplace, cauchy
Notes
-----
The probability density function for `genhyperbolic` is:
.. math::
f(x, p, a, b) =
\frac{(a^2 - b^2)^{p/2}}
{\sqrt{2\pi}a^{p-0.5}
K_p\Big(\sqrt{a^2 - b^2}\Big)}
e^{bx} \times \frac{K_{p - 1/2}
(a \sqrt{1 + x^2})}
{(\sqrt{1 + x^2})^{1/2 - p}}
for :math:`x, p \in ( - \infty; \infty)`,
:math:`|b| < a` if :math:`p \ge 0`,
:math:`|b| \le a` if :math:`p < 0`.
:math:`K_{p}(.)` denotes the modified Bessel function of the second
kind and order :math:`p` (`scipy.special.kn`)
`genhyperbolic` takes ``p`` as a tail parameter,
``a`` as a shape parameter,
``b`` as a skewness parameter.
%(after_notes)s
The original parameterization of the Generalized Hyperbolic Distribution
is found in [1]_ as follows
.. math::
f(x, \lambda, \alpha, \beta, \delta, \mu) =
\frac{(\gamma/\delta)^\lambda}{\sqrt{2\pi}K_\lambda(\delta \gamma)}
e^{\beta (x - \mu)} \times \frac{K_{\lambda - 1/2}
(\alpha \sqrt{\delta^2 + (x - \mu)^2})}
{(\sqrt{\delta^2 + (x - \mu)^2} / \alpha)^{1/2 - \lambda}}
for :math:`x \in ( - \infty; \infty)`,
:math:`\gamma := \sqrt{\alpha^2 - \beta^2}`,
:math:`\lambda, \mu \in ( - \infty; \infty)`,
:math:`\delta \ge 0, |\beta| < \alpha` if :math:`\lambda \ge 0`,
:math:`\delta > 0, |\beta| \le \alpha` if :math:`\lambda < 0`.
The location-scale-based parameterization implemented in
SciPy is based on [2]_, where :math:`a = \alpha\delta`,
:math:`b = \beta\delta`, :math:`p = \lambda`,
:math:`scale=\delta` and :math:`loc=\mu`
Moments are implemented based on [3]_ and [4]_.
For the distributions that are a special case such as Student's t,
it is not recommended to rely on the implementation of genhyperbolic.
To avoid potential numerical problems and for performance reasons,
the methods of the specific distributions should be used.
References
----------
.. [1] O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions
on Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978. https://www.jstor.org/stable/4615705
.. [2] Eberlein E., Prause K. (2002) The Generalized Hyperbolic Model:
Financial Derivatives and Risk Measures. In: Geman H., Madan D.,
Pliska S.R., Vorst T. (eds) Mathematical Finance - Bachelier
Congress 2000. Springer Finance. Springer, Berlin, Heidelberg.
:doi:`10.1007/978-3-662-12429-1_12`
.. [3] Scott, David J, Würtz, Diethelm, Dong, Christine and Tran,
Thanh Tam, (2009), Moments of the generalized hyperbolic
distribution, MPRA Paper, University Library of Munich, Germany,
https://EconPapers.repec.org/RePEc:pra:mprapa:19081.
.. [4] E. Eberlein and E. A. von Hammerstein. Generalized hyperbolic
and inverse Gaussian distributions: Limiting cases and approximation
of processes. FDM Preprint 80, April 2003. University of Freiburg.
https://freidok.uni-freiburg.de/fedora/objects/freidok:7974/datastreams/FILE1/content
%(example)s
"""
def _argcheck(self, p, a, b):
return (np.logical_and(np.abs(b) < a, p >= 0)
| np.logical_and(np.abs(b) <= a, p < 0))
def _logpdf(self, x, p, a, b):
# kve instead of kv works better for large values of p
# and smaller values of sqrt(a^2 - b^2)
@np.vectorize
def _logpdf_single(x, p, a, b):
return _stats.genhyperbolic_logpdf(x, p, a, b)
return _logpdf_single(x, p, a, b)
def _pdf(self, x, p, a, b):
# kve instead of kv works better for large values of p
# and smaller values of sqrt(a^2 - b^2)
@np.vectorize
def _pdf_single(x, p, a, b):
return _stats.genhyperbolic_pdf(x, p, a, b)
return _pdf_single(x, p, a, b)
def _cdf(self, x, p, a, b):
@np.vectorize
def _cdf_single(x, p, a, b):
user_data = np.array(
[p, a, b], float
).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(
_stats, '_genhyperbolic_pdf', user_data
)
t1 = integrate.quad(llc, -np.inf, x)[0]
if np.isnan(t1):
msg = ("Infinite values encountered in scipy.special.kve. "
"Values replaced by NaN to avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
return t1
return _cdf_single(x, p, a, b)
def _rvs(self, p, a, b, size=None, random_state=None):
# note: X = b * V + sqrt(V) * X has a
# generalized hyperbolic distribution
# if X is standard normal and V is
# geninvgauss(p = p, b = t2, loc = loc, scale = t3)
t1 = np.float_power(a, 2) - np.float_power(b, 2)
# b in the GIG
t2 = np.float_power(t1, 0.5)
# scale in the GIG
t3 = np.float_power(t1, - 0.5)
gig = geninvgauss.rvs(
p=p,
b=t2,
scale=t3,
size=size,
random_state=random_state
)
normst = norm.rvs(size=size, random_state=random_state)
return b * gig + np.sqrt(gig) * normst
def _stats(self, p, a, b):
# https://mpra.ub.uni-muenchen.de/19081/1/MPRA_paper_19081.pdf
# https://freidok.uni-freiburg.de/fedora/objects/freidok:7974/datastreams/FILE1/content
# standardized moments
p, a, b = np.broadcast_arrays(p, a, b)
t1 = np.float_power(a, 2) - np.float_power(b, 2)
t1 = np.float_power(t1, 0.5)
t2 = np.float_power(1, 2) * np.float_power(t1, - 1)
integers = np.linspace(0, 4, 5)
# make integers perpendicular to existing dimensions
integers = integers.reshape(integers.shape + (1,) * p.ndim)
b0, b1, b2, b3, b4 = sc.kv(p + integers, t1)
r1, r2, r3, r4 = [b / b0 for b in (b1, b2, b3, b4)]
m = b * t2 * r1
v = (
t2 * r1 + np.float_power(b, 2) * np.float_power(t2, 2) *
(r2 - np.float_power(r1, 2))
)
m3e = (
np.float_power(b, 3) * np.float_power(t2, 3) *
(r3 - 3 * b2 * b1 * np.float_power(b0, -2) +
2 * np.float_power(r1, 3)) +
3 * b * np.float_power(t2, 2) *
(r2 - np.float_power(r1, 2))
)
s = m3e * np.float_power(v, - 3 / 2)
m4e = (
np.float_power(b, 4) * np.float_power(t2, 4) *
(r4 - 4 * b3 * b1 * np.float_power(b0, - 2) +
6 * b2 * np.float_power(b1, 2) * np.float_power(b0, - 3) -
3 * np.float_power(r1, 4)) +
np.float_power(b, 2) * np.float_power(t2, 3) *
(6 * r3 - 12 * b2 * b1 * np.float_power(b0, - 2) +
6 * np.float_power(r1, 3)) +
3 * np.float_power(t2, 2) * r2
)
k = m4e * np.float_power(v, -2) - 3
return m, v, s, k
genhyperbolic = genhyperbolic_gen(name='genhyperbolic')
class gompertz_gen(rv_continuous):
r"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is:
.. math::
f(x, c) = c \exp(x) \exp(-c (e^x-1))
for :math:`x \ge 0`, :math:`c > 0`.
`gompertz` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
def _average_with_log_weights(x, logweights):
x = np.asarray(x)
logweights = np.asarray(logweights)
maxlogw = logweights.max()
weights = np.exp(logweights - maxlogw)
return np.average(x, weights=weights)
class gumbel_r_gen(rv_continuous):
r"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is:
.. math::
f(x) = \exp(-(x + e^{-x}))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_r.pdf(x) = exp(-(x + exp(-x)))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _sf(self, x):
return -sc.expm1(-np.exp(-x))
def _isf(self, p):
return -np.log(-np.log1p(-p))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# https://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
@_call_super_mom
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# if user has provided `floc` or `fscale`, fall back on super fit
# method. This scenario is not suitable for solving a system of
# equations
if floc is not None or fscale is not None:
return super().fit(data, *args, **kwds)
# rv_continuous provided guesses
loc, scale = self._fitstart(data)
# account for user provided guesses
loc = kwds.pop('loc', loc)
scale = kwds.pop('scale', scale)
# By the method of maximum likelihood, the estimators of the
# location and scale are the roots of the equation defined in
# `func` and the value of the expression for `loc` that follows.
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 101
def func(scale, data):
sdata = -data / scale
wavg = _average_with_log_weights(data, logweights=sdata)
return data.mean() - wavg - scale
soln = optimize.root(func, scale, args=(data,),
options={'xtol': 1e-14})
scale = soln.x[0]
loc = -scale * (sc.logsumexp(-data/scale) - np.log(len(data)))
return loc, scale
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
r"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is:
.. math::
f(x) = \exp(x - e^x)
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_l.pdf(x) = exp(x - exp(x))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
@_call_super_mom
def fit(self, data, *args, **kwds):
# The fit method of `gumbel_r` can be used for this distribution with
# small modifications. The process to do this is
# 1. pass the sign negated data into `gumbel_r.fit`
# 2. negate the sign of the resulting location, leaving the scale
# unmodified.
# `gumbel_r.fit` holds necessary input checks.
loc_r, scale_r, = gumbel_r.fit(-np.asarray(data), *args, **kwds)
return (-loc_r, scale_r)
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
r"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is:
.. math::
f(x) = \frac{2}{\pi (1 + x^2)}
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
r"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is:
.. math::
f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 }
= \frac{1}{2} \text{sech}(x/2)^2
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2
# = 1/2 * sech(x/2)**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
r"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is:
.. math::
f(x) = \sqrt{2/\pi} \exp(-x^2 / 2)
for :math:`x >= 0`.
`halfnorm` is a special case of `chi` with ``df=1``.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return abs(random_state.standard_normal(size=size))
def _pdf(self, x):
# halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi),
1-2.0/np.pi,
np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
r"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is:
.. math::
f(x) = \frac{1}{\pi} \text{sech}(x)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# hypsecant.pdf(x) = 1/pi * sech(x)
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
r"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is:
.. math::
f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c}
for :math:`0 \le x \le 1`, :math:`a > 0`, :math:`b > 0`, :math:`z > -1`,
and :math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}`.
:math:`F[2, 1]` is the Gauss hypergeometric function
`scipy.special.hyp2f1`.
`gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape
parameters.
%(after_notes)s
References
----------
.. [1] Armero, C., and M. J. Bayarri. "Prior Assessments for Prediction in
Queues." *Journal of the Royal Statistical Society*. Series D (The
Statistician) 43, no. 1 (1994): 139-53. doi:10.2307/2348939
%(example)s
"""
def _argcheck(self, a, b, c, z):
# z > -1 per gh-10134
return (a > 0) & (b > 0) & (c == c) & (z > -1)
def _pdf(self, x, a, b, c, z):
# gausshyper.pdf(x, a, b, c, z) =
# C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
r"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is:
.. math::
f(x, a) = \frac{x^{-a-1}}{\Gamma(a)} \exp(-\frac{1}{x})
for :math:`x >= 0`, :math:`a > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`invgamma` takes ``a`` as a shape parameter for :math:`a`.
`invgamma` is a special case of `gengamma` with ``c=-1``, and it is a
different parameterization of the scaled inverse chi-squared distribution.
Specifically, if the scaled inverse chi-squared distribution is
parameterized with degrees of freedom :math:`\nu` and scaling parameter
:math:`\tau^2`, then it can be modeled using `invgamma` with
``a=`` :math:`\nu/2` and ``scale=`` :math:`\nu \tau^2/2`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
class invgauss_gen(rv_continuous):
r"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}}
\exp(-\frac{(x-\mu)^2}{2 x \mu^2})
for :math:`x >= 0` and :math:`\mu > 0`.
`invgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu, size=None, random_state=None):
return random_state.wald(mu, 1.0, size=size)
def _pdf(self, x, mu):
# invgauss.pdf(x, mu) =
# 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
# approach adapted from equations in
# https://journal.r-project.org/archive/2016-1/giner-smyth.pdf,
# not R code. see gh-13616
def _logcdf(self, x, mu):
fac = 1 / np.sqrt(x)
a = _norm_logcdf(fac * ((x / mu) - 1))
b = 2 / mu + _norm_logcdf(-fac * ((x / mu) + 1))
return a + np.log1p(np.exp(b - a))
def _logsf(self, x, mu):
fac = 1 / np.sqrt(x)
a = _norm_logsf(fac * ((x / mu) - 1))
b = 2 / mu + _norm_logcdf(-fac * (x + mu) / mu)
return a + np.log1p(-np.exp(b - a))
def _sf(self, x, mu):
return np.exp(self._logsf(x, mu))
def _cdf(self, x, mu):
return np.exp(self._logcdf(x, mu))
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
def fit(self, data, *args, **kwds):
method = kwds.get('method', 'mle')
if type(self) == wald_gen or method.lower() == 'mm':
return super().fit(data, *args, **kwds)
data, fshape_s, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
'''
Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
and Peacock (2000), Page 121. Their shape parameter is equivilent to
SciPy's with the conversion `fshape_s = fshape / scale`.
MLE formulas are not used in 3 condtions:
- `loc` is not fixed
- `mu` is fixed
These cases fall back on the superclass fit method.
- `loc` is fixed but translation results in negative data raises
a `FitDataError`.
'''
if floc is None or fshape_s is not None:
return super().fit(data, *args, **kwds)
elif np.any(data - floc < 0):
raise FitDataError("invgauss", lower=0, upper=np.inf)
else:
data = data - floc
fshape_n = np.mean(data)
if fscale is None:
fscale = len(data) / (np.sum(data ** -1 - fshape_n ** -1))
fshape_s = fshape_n / fscale
return fshape_s, floc, fscale
invgauss = invgauss_gen(a=0.0, name='invgauss')
class geninvgauss_gen(rv_continuous):
r"""A Generalized Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `geninvgauss` is:
.. math::
f(x, p, b) = x^{p-1} \exp(-b (x + 1/x) / 2) / (2 K_p(b))
where `x > 0`, and the parameters `p, b` satisfy `b > 0` ([1]_).
:math:`K_p` is the modified Bessel function of second kind of order `p`
(`scipy.special.kv`).
%(after_notes)s
The inverse Gaussian distribution `stats.invgauss(mu)` is a special case of
`geninvgauss` with `p = -1/2`, `b = 1 / mu` and `scale = mu`.
Generating random variates is challenging for this distribution. The
implementation is based on [2]_.
References
----------
.. [1] O. Barndorff-Nielsen, P. Blaesild, C. Halgreen, "First hitting time
models for the generalized inverse gaussian distribution",
Stochastic Processes and their Applications 7, pp. 49--54, 1978.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
%(example)s
"""
def _argcheck(self, p, b):
return (p == p) & (b > 0)
def _logpdf(self, x, p, b):
# kve instead of kv works better for large values of b
# warn if kve produces infinite values and replace by nan
# otherwise c = -inf and the results are often incorrect
@np.vectorize
def logpdf_single(x, p, b):
return _stats.geninvgauss_logpdf(x, p, b)
z = logpdf_single(x, p, b)
if np.isnan(z).any():
msg = ("Infinite values encountered in scipy.special.kve(p, b). "
"Values replaced by NaN to avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
return z
def _pdf(self, x, p, b):
# relying on logpdf avoids overflow of x**(p-1) for large x and p
return np.exp(self._logpdf(x, p, b))
def _cdf(self, x, *args):
_a, _b = self._get_support(*args)
@np.vectorize
def _cdf_single(x, *args):
p, b = args
user_data = np.array([p, b], float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, '_geninvgauss_pdf',
user_data)
return integrate.quad(llc, _a, x)[0]
return _cdf_single(x, *args)
def _logquasipdf(self, x, p, b):
# log of the quasi-density (w/o normalizing constant) used in _rvs
return _lazywhere(x > 0, (x, p, b),
lambda x, p, b: (p - 1)*np.log(x) - b*(x + 1/x)/2,
-np.inf)
def _rvs(self, p, b, size=None, random_state=None):
# if p and b are scalar, use _rvs_scalar, otherwise need to create
# output by iterating over parameters
if np.isscalar(p) and np.isscalar(b):
out = self._rvs_scalar(p, b, size, random_state)
elif p.size == 1 and b.size == 1:
out = self._rvs_scalar(p.item(), b.item(), size, random_state)
else:
# When this method is called, size will be a (possibly empty)
# tuple of integers. It will not be None; if `size=None` is passed
# to `rvs()`, size will be the empty tuple ().
p, b = np.broadcast_arrays(p, b)
# p and b now have the same shape.
# `shp` is the shape of the blocks of random variates that are
# generated for each combination of parameters associated with
# broadcasting p and b.
# bc is a tuple the same lenth as size. The values
# in bc are bools. If bc[j] is True, it means that
# entire axis is filled in for a given combination of the
# broadcast arguments.
shp, bc = _check_shape(p.shape, size)
# `numsamples` is the total number of variates to be generated
# for each combination of the input arguments.
numsamples = int(np.prod(shp))
# `out` is the array to be returned. It is filled in in the
# loop below.
out = np.empty(size)
it = np.nditer([p, b],
flags=['multi_index'],
op_flags=[['readonly'], ['readonly']])
while not it.finished:
# Convert the iterator's multi_index into an index into the
# `out` array where the call to _rvs_scalar() will be stored.
# Where bc is True, we use a full slice; otherwise we use the
# index value from it.multi_index. len(it.multi_index) might
# be less than len(bc), and in that case we want to align these
# two sequences to the right, so the loop variable j runs from
# -len(size) to 0. This doesn't cause an IndexError, as
# bc[j] will be True in those cases where it.multi_index[j]
# would cause an IndexError.
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
out[idx] = self._rvs_scalar(it[0], it[1], numsamples,
random_state).reshape(shp)
it.iternext()
if size == ():
out = out.item()
return out
def _rvs_scalar(self, p, b, numsamples, random_state):
# following [2], the quasi-pdf is used instead of the pdf for the
# generation of rvs
invert_res = False
if not(numsamples):
numsamples = 1
if p < 0:
# note: if X is geninvgauss(p, b), then 1/X is geninvgauss(-p, b)
p = -p
invert_res = True
m = self._mode(p, b)
# determine method to be used following [2]
ratio_unif = True
if p >= 1 or b > 1:
# ratio of uniforms with mode shift below
mode_shift = True
elif b >= min(0.5, 2 * np.sqrt(1 - p) / 3):
# ratio of uniforms without mode shift below
mode_shift = False
else:
# new algorithm in [2]
ratio_unif = False
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
x = np.zeros(N)
simulated = 0
if ratio_unif:
# use ratio of uniforms method
if mode_shift:
a2 = -2 * (p + 1) / b - m
a1 = 2 * m * (p - 1) / b - 1
# find roots of x**3 + a2*x**2 + a1*x + m (Cardano's formula)
p1 = a1 - a2**2 / 3
q1 = 2 * a2**3 / 27 - a2 * a1 / 3 + m
phi = np.arccos(-q1 * np.sqrt(-27 / p1**3) / 2)
s1 = -np.sqrt(-4 * p1 / 3)
root1 = s1 * np.cos(phi / 3 + np.pi / 3) - a2 / 3
root2 = -s1 * np.cos(phi / 3) - a2 / 3
# root3 = s1 * np.cos(phi / 3 - np.pi / 3) - a2 / 3
# if g is the quasipdf, rescale: g(x) / g(m) which we can write
# as exp(log(g(x)) - log(g(m))). This is important
# since for large values of p and b, g cannot be evaluated.
# denote the rescaled quasipdf by h
lm = self._logquasipdf(m, p, b)
d1 = self._logquasipdf(root1, p, b) - lm
d2 = self._logquasipdf(root2, p, b) - lm
# compute the bounding rectangle w.r.t. h. Note that
# np.exp(0.5*d1) = np.sqrt(g(root1)/g(m)) = np.sqrt(h(root1))
vmin = (root1 - m) * np.exp(0.5 * d1)
vmax = (root2 - m) * np.exp(0.5 * d2)
umax = 1 # umax = sqrt(h(m)) = 1
logqpdf = lambda x: self._logquasipdf(x, p, b) - lm
c = m
else:
# ratio of uniforms without mode shift
# compute np.sqrt(quasipdf(m))
umax = np.exp(0.5*self._logquasipdf(m, p, b))
xplus = ((1 + p) + np.sqrt((1 + p)**2 + b**2))/b
vmin = 0
# compute xplus * np.sqrt(quasipdf(xplus))
vmax = xplus * np.exp(0.5 * self._logquasipdf(xplus, p, b))
c = 0
logqpdf = lambda x: self._logquasipdf(x, p, b)
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
i = 1
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u = umax * random_state.uniform(size=k)
v = random_state.uniform(size=k)
v = vmin + (vmax - vmin) * v
rvs = v / u + c
# rewrite acceptance condition u**2 <= pdf(rvs) by taking logs
accept = (2*np.log(u) <= logqpdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated "
"in {} attempts. Sampling does not appear to "
"work for the provided parameters.".format(i*N))
raise RuntimeError(msg)
i += 1
else:
# use new algorithm in [2]
x0 = b / (1 - p)
xs = np.max((x0, 2 / b))
k1 = np.exp(self._logquasipdf(m, p, b))
A1 = k1 * x0
if x0 < 2 / b:
k2 = np.exp(-b)
if p > 0:
A2 = k2 * ((2 / b)**p - x0**p) / p
else:
A2 = k2 * np.log(2 / b**2)
else:
k2, A2 = 0, 0
k3 = xs**(p - 1)
A3 = 2 * k3 * np.exp(-xs * b / 2) / b
A = A1 + A2 + A3
# [2]: rejection constant is < 2.73; so expected runtime is finite
while simulated < N:
k = N - simulated
h, rvs = np.zeros(k), np.zeros(k)
# simulate uniform rvs on [x1, x2] and [0, y2]
u = random_state.uniform(size=k)
v = A * random_state.uniform(size=k)
cond1 = v <= A1
cond2 = np.logical_not(cond1) & (v <= A1 + A2)
cond3 = np.logical_not(cond1 | cond2)
# subdomain (0, x0)
rvs[cond1] = x0 * v[cond1] / A1
h[cond1] = k1
# subdomain (x0, 2 / b)
if p > 0:
rvs[cond2] = (x0**p + (v[cond2] - A1) * p / k2)**(1 / p)
else:
rvs[cond2] = b * np.exp((v[cond2] - A1) * np.exp(b))
h[cond2] = k2 * rvs[cond2]**(p - 1)
# subdomain (xs, infinity)
z = np.exp(-xs * b / 2) - b * (v[cond3] - A1 - A2) / (2 * k3)
rvs[cond3] = -2 / b * np.log(z)
h[cond3] = k3 * np.exp(-rvs[cond3] * b / 2)
# apply rejection method
accept = (np.log(u * h) <= self._logquasipdf(rvs, p, b))
num_accept = sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
rvs = np.reshape(x, size1d)
if invert_res:
rvs = 1 / rvs
return rvs
def _mode(self, p, b):
# distinguish cases to avoid catastrophic cancellation (see [2])
if p < 1:
return b / (np.sqrt((p - 1)**2 + b**2) + 1 - p)
else:
return (np.sqrt((1 - p)**2 + b**2) - (1 - p)) / b
def _munp(self, n, p, b):
num = sc.kve(p + n, b)
denom = sc.kve(p, b)
inf_vals = np.isinf(num) | np.isinf(denom)
if inf_vals.any():
msg = ("Infinite values encountered in the moment calculation "
"involving scipy.special.kve. Values replaced by NaN to "
"avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
m = np.full_like(num, np.nan, dtype=np.double)
m[~inf_vals] = num[~inf_vals] / denom[~inf_vals]
else:
m = num / denom
return m
geninvgauss = geninvgauss_gen(a=0.0, name="geninvgauss")
class norminvgauss_gen(rv_continuous):
r"""A Normal Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `norminvgauss` is:
.. math::
f(x, a, b) = \frac{a \, K_1(a \sqrt{1 + x^2})}{\pi \sqrt{1 + x^2}} \,
\exp(\sqrt{a^2 - b^2} + b x)
where :math:`x` is a real number, the parameter :math:`a` is the tail
heaviness and :math:`b` is the asymmetry parameter satisfying
:math:`a > 0` and :math:`|b| <= a`.
:math:`K_1` is the modified Bessel function of second kind
(`scipy.special.k1`).
%(after_notes)s
A normal inverse Gaussian random variable `Y` with parameters `a` and `b`
can be expressed as a normal mean-variance mixture:
`Y = b * V + sqrt(V) * X` where `X` is `norm(0,1)` and `V` is
`invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used
to generate random variates.
Another common parametrization of the distribution (see Equation 2.1 in
[2]_) is given by the following expression of the pdf:
.. math::
g(x, \alpha, \beta, \delta, \mu) =
\frac{\alpha\delta K_1\left(\alpha\sqrt{\delta^2 + (x - \mu)^2}\right)}
{\pi \sqrt{\delta^2 + (x - \mu)^2}} \,
e^{\delta \sqrt{\alpha^2 - \beta^2} + \beta (x - \mu)}
In SciPy, this corresponds to
`a = alpha * delta, b = beta * delta, loc = mu, scale=delta`.
References
----------
.. [1] O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on
Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978.
.. [2] O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and
Stochastic Volatility Modelling", Scandinavian Journal of
Statistics, Vol. 24, pp. 1-13, 1997.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (a > 0) & (np.absolute(b) < a)
def _pdf(self, x, a, b):
gamma = np.sqrt(a**2 - b**2)
fac1 = a / np.pi * np.exp(gamma)
sq = np.hypot(1, x) # reduce overflows
return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq
def _rvs(self, a, b, size=None, random_state=None):
# note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard
# normal and V is invgauss(mu=1/sqrt(a**2 - b**2))
gamma = np.sqrt(a**2 - b**2)
ig = invgauss.rvs(mu=1/gamma, size=size, random_state=random_state)
return b * ig + np.sqrt(ig) * norm.rvs(size=size,
random_state=random_state)
def _stats(self, a, b):
gamma = np.sqrt(a**2 - b**2)
mean = b / gamma
variance = a**2 / gamma**3
skewness = 3.0 * b / (a * np.sqrt(gamma))
kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma
return mean, variance, skewness, kurtosis
norminvgauss = norminvgauss_gen(name="norminvgauss")
class invweibull_gen(rv_continuous):
u"""An inverted Weibull continuous random variable.
This distribution is also known as the Fréchet distribution or the
type II extreme value distribution.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is:
.. math::
f(x, c) = c x^{-c-1} \\exp(-x^{-c})
for :math:`x > 0`, :math:`c > 0`.
`invweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
# invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
def _fitstart(self, data, args=None):
# invweibull requires c > 1 for the first moment to exist, so use 2.0
args = (2.0,) if args is None else args
return super(invweibull_gen, self)._fitstart(data, args=args)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
r"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is:
.. math::
f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} )
where :math:`x`, :math:`a`, and :math:`b` are real scalars; :math:`b > 0`
and :math:`x \in [0,1]`. :math:`\phi` is the pdf of the normal
distribution.
`johnsonsb` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
r"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is:
.. math::
f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}}
\phi(a + b \log(x + \sqrt{x^2 + 1}))
where :math:`x`, :math:`a`, and :math:`b` are real scalars; :math:`b > 0`.
:math:`\phi` is the pdf of the normal distribution.
`johnsonsu` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
# phi(a + b * log(x + sqrt(x**2 + 1)))
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
r"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is
.. math::
f(x) = \frac{1}{2} \exp(-|x|)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.laplace(0, 1, size=size)
def _pdf(self, x):
# laplace.pdf(x) = 1/2 * exp(-abs(x))
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
with np.errstate(over='ignore'):
return np.where(x > 0, 1.0 - 0.5*np.exp(-x), 0.5*np.exp(x))
def _sf(self, x):
# By symmetry...
return self._cdf(-x)
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _isf(self, q):
# By symmetry...
return -self._ppf(q)
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the Laplace distribution parameters, so the keyword
arguments `loc`, `scale`, and `optimizer` are ignored.\n\n""")
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 124
if floc is None:
floc = np.median(data)
if fscale is None:
fscale = (np.sum(np.abs(data - floc))) / len(data)
return floc, fscale
laplace = laplace_gen(name='laplace')
class laplace_asymmetric_gen(rv_continuous):
r"""An asymmetric Laplace continuous random variable.
%(before_notes)s
See Also
--------
laplace : Laplace distribution
Notes
-----
The probability density function for `laplace_asymmetric` is
.. math::
f(x, \kappa) &= \frac{1}{\kappa+\kappa^{-1}}\exp(-x\kappa),\quad x\ge0\\
&= \frac{1}{\kappa+\kappa^{-1}}\exp(x/\kappa),\quad x<0\\
for :math:`-\infty < x < \infty`, :math:`\kappa > 0`.
`laplace_asymmetric` takes ``kappa`` as a shape parameter for
:math:`\kappa`. For :math:`\kappa = 1`, it is identical to a
Laplace distribution.
%(after_notes)s
References
----------
.. [1] "Asymmetric Laplace distribution", Wikipedia
https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution
.. [2] Kozubowski TJ and Podgórski K. A Multivariate and
Asymmetric Generalization of Laplace Distribution,
Computational Statistics 15, 531--540 (2000).
:doi:`10.1007/PL00022717`
%(example)s
"""
def _pdf(self, x, kappa):
return np.exp(self._logpdf(x, kappa))
def _logpdf(self, x, kappa):
kapinv = 1/kappa
lPx = x * np.where(x >= 0, -kappa, kapinv)
lPx -= np.log(kappa+kapinv)
return lPx
def _cdf(self, x, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(x >= 0,
1 - np.exp(-x*kappa)*(kapinv/kappkapinv),
np.exp(x*kapinv)*(kappa/kappkapinv))
def _sf(self, x, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(x >= 0,
np.exp(-x*kappa)*(kapinv/kappkapinv),
1 - np.exp(x*kapinv)*(kappa/kappkapinv))
def _ppf(self, q, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(q >= kappa/kappkapinv,
-np.log((1 - q)*kappkapinv*kappa)*kapinv,
np.log(q*kappkapinv/kappa)*kappa)
def _isf(self, q, kappa):
kapinv = 1/kappa
kappkapinv = kappa+kapinv
return np.where(q <= kapinv/kappkapinv,
-np.log(q*kappkapinv*kappa)*kapinv,
np.log((1 - q)*kappkapinv/kappa)*kappa)
def _stats(self, kappa):
kapinv = 1/kappa
mn = kapinv - kappa
var = kapinv*kapinv + kappa*kappa
g1 = 2.0*(1-np.power(kappa, 6))/np.power(1+np.power(kappa, 4), 1.5)
g2 = 6.0*(1+np.power(kappa, 8))/np.power(1+np.power(kappa, 4), 2)
return mn, var, g1, g2
def _entropy(self, kappa):
return 1 + np.log(kappa+1/kappa)
laplace_asymmetric = laplace_asymmetric_gen(name='laplace_asymmetric')
def _check_fit_input_parameters(dist, data, args, kwds):
data = np.asarray(data)
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
num_shapes = len(dist.shapes.split(",")) if dist.shapes else 0
fshape_keys = []
fshapes = []
# user has many options for fixing the shape, so here we standardize it
# into 'f' + the number of the shape.
# Adapted from `_reduce_func` in `_distn_infrastructure.py`:
if dist.shapes:
shapes = dist.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
fshape_keys.append(key)
fshapes.append(val)
if val is not None:
kwds[key] = val
# determine if there are any unknown arguments in kwds
known_keys = {'loc', 'scale', 'optimizer', 'method',
'floc', 'fscale', *fshape_keys}
unknown_keys = set(kwds).difference(known_keys)
if unknown_keys:
raise TypeError(f"Unknown keyword arguments: {unknown_keys}.")
if len(args) > num_shapes:
raise TypeError("Too many positional arguments.")
if None not in {floc, fscale, *fshapes}:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise RuntimeError("All parameters fixed. There is nothing to "
"optimize.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
return (data, *fshapes, floc, fscale)
class levy_gen(rv_continuous):
r"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp\left(-\frac{1}{2x}\right)
for :math:`x >= 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _sf(self, x):
return sc.erf(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
r"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is:
.. math::
f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp{ \left(-\frac{1}{2|x|} \right)}
for :math:`x <= 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=-1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _sf(self, x):
ax = abs(x)
return 2 * _norm_sf(1 / np.sqrt(ax))
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _isf(self, p):
return -1/_norm_isf(p/2)**2
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class logistic_gen(rv_continuous):
r"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is:
.. math::
f(x) = \frac{\exp(-x)}
{(1+\exp(-x))^2}
`logistic` is a special case of `genlogistic` with ``c=1``.
Remark that the survival function (``logistic.sf``) is equal to the
Fermi-Dirac distribution describing fermionic statistics.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.logistic(size=size)
def _pdf(self, x):
# logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
y = -np.abs(x)
return y - 2. * sc.log1p(np.exp(y))
def _cdf(self, x):
return sc.expit(x)
def _logcdf(self, x):
return sc.log_expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _logsf(self, x):
return sc.log_expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# https://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
@_call_super_mom
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# if user has provided `floc` or `fscale`, fall back on super fit
# method. This scenario is not suitable for solving a system of
# equations
if floc is not None or fscale is not None:
return super().fit(data, *args, **kwds)
# rv_continuous provided guesses
loc, scale = self._fitstart(data)
# account for user provided guesses
loc = kwds.pop('loc', loc)
scale = kwds.pop('scale', scale)
# the maximum likelihood estimators `a` and `b` of the location and
# scale parameters are roots of the two equations described in `func`.
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings, and
# Peacock (2000), Page 130
def func(params, data):
a, b = params
n = len(data)
c = (data - a) / b
x1 = np.sum(sc.expit(c)) - n/2
x2 = np.sum(c*np.tanh(c/2)) - n
return x1, x2
return tuple(optimize.root(func, (loc, scale), args=(data,)).x)
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
r"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is:
.. math::
f(x, c) = \frac{\exp(c x - \exp(x))}
{\Gamma(c)}
for all :math:`x, c > 0`. Here, :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`loggamma` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return np.log(random_state.gamma(c, size=size))
def _pdf(self, x, c):
# loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _logpdf(self, x, c):
return c*x - np.exp(x) - sc.gammaln(c)
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _sf(self, x, c):
return sc.gammaincc(c, np.exp(x))
def _isf(self, q, c):
return np.log(sc.gammainccinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
r"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is:
.. math::
f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1} &\text{for } 0 < x < 1\\
\frac{c}{2} x^{-c-1} &\text{for } x \ge 1
\end{cases}
for :math:`c > 0`.
`loglaplace` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
# loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
# = c / 2 * x**(-c-1), for x >= 1
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
r"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is:
.. math::
f(x, s) = \frac{1}{s x \sqrt{2\pi}}
\exp\left(-\frac{\log^2(x)}{2s^2}\right)
for :math:`x > 0`, :math:`s > 0`.
`lognorm` takes ``s`` as a shape parameter for :math:`s`.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s, size=None, random_state=None):
return np.exp(s * random_state.standard_normal(size))
def _pdf(self, x, s):
# lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
When `method='MLE'` and
the location parameter is fixed by using the `floc` argument,
this function uses explicit formulas for the maximum likelihood
estimation of the log-normal shape and scale parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.
\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc is None:
# fall back on the default fit method.
return super().fit(data, *args, **kwds)
f0 = (kwds.get('f0', None) or kwds.get('fs', None) or
kwds.get('fix_s', None))
fscale = kwds.get('fscale', None)
if len(args) > 1:
raise TypeError("Too many input arguments.")
for name in ['f0', 'fs', 'fix_s', 'floc', 'fscale', 'loc', 'scale',
'optimizer', 'method']:
kwds.pop(name, None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# Special case: loc is fixed. Use the maximum likelihood formulas
# instead of the numerical solver.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
floc = float(floc)
if floc != 0:
# Shifting the data by floc. Don't do the subtraction in-place,
# because `data` might be a view of the input array.
data = data - floc
if np.any(data <= 0):
raise FitDataError("lognorm", lower=floc, upper=np.inf)
lndata = np.log(data)
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free.
scale = np.exp(lndata.mean())
if f0 is None:
# shape is free.
shape = lndata.std()
else:
# shape is fixed.
shape = float(f0)
else:
# scale is fixed, shape is free
scale = float(fscale)
shape = np.sqrt(((lndata - np.log(scale))**2).mean())
return shape, floc, scale
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
r"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is:
.. math::
f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2)
`gilbrat` is a special case of `lognorm` with ``s=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return np.exp(random_state.standard_normal(size))
def _pdf(self, x):
# gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
r"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df=3``, ``loc=0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is:
.. math::
f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2)
for :math:`x >= 0`.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return chi.rvs(3.0, size=size, random_state=random_state)
def _pdf(self, x):
# maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
return _SQRT_2_OVER_PI*x*x*np.exp(-x*x/2.0)
def _logpdf(self, x):
# Allow x=0 without 'divide by zero' warnings
with np.errstate(divide='ignore'):
return _LOG_SQRT_2_OVER_PI + 2*np.log(x) - 0.5*x*x
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
r"""A Mielke Beta-Kappa / Dagum continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is:
.. math::
f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}}
for :math:`x > 0` and :math:`k, s > 0`. The distribution is sometimes
called Dagum distribution ([2]_). It was already defined in [3]_, called
a Burr Type III distribution (`burr` with parameters ``c=s`` and
``d=k/s``).
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
References
----------
.. [1] Mielke, P.W., 1973 "Another Family of Distributions for Describing
and Analyzing Precipitation Data." J. Appl. Meteor., 12, 275-280
.. [2] Dagum, C., 1977 "A new model for personal income distribution."
Economie Appliquee, 33, 327-367.
.. [3] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
def _argcheck(self, k, s):
return (k > 0) & (s > 0)
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _logpdf(self, x, k, s):
# Allow x=0 without 'divide by zero' warnings.
with np.errstate(divide='ignore'):
return np.log(k) + np.log(x)*(k - 1) - np.log1p(x**s)*(1 + k/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
def _munp(self, n, k, s):
def nth_moment(n, k, s):
# n-th moment is defined for -k < n < s
return sc.gamma((k+n)/s)*sc.gamma(1-n/s)/sc.gamma(k/s)
return _lazywhere(n < s, (n, k, s), nth_moment, np.inf)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
r"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is:
.. math::
f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1}
if :math:`h` and :math:`k` are not equal to 0.
If :math:`h` or :math:`k` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes :math:`h` and :math:`k` as shape parameters.
The kappa4 distribution returns other distributions when certain
:math:`h` and :math:`k` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
https://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
https://digitalcommons.lsu.edu/gradschool_dissertations/3672
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
:doi:`10.4236/jwarp.2012.410101`
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
shape = np.broadcast_arrays(h, k)[0].shape
return np.full(shape, fill_value=True)
def _get_support(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - np.float_power(h, -k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
_a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
_b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return _a, _b
def _pdf(self, x, h, k):
# kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
# (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _get_stats_info(self, h, k):
condlist = [
np.logical_and(h < 0, k >= 0),
k < 0,
]
def f0(h, k):
return (-1.0/h*k).astype(int)
def f1(h, k):
return (-1.0/k).astype(int)
return _lazyselect(condlist, [f0, f1], [h, k], default=5)
def _stats(self, h, k):
maxr = self._get_stats_info(h, k)
outputs = [None if np.any(r < maxr) else np.nan for r in range(1, 5)]
return outputs[:]
def _mom1_sc(self, m, *args):
maxr = self._get_stats_info(args[0], args[1])
if m >= maxr:
return np.nan
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
r"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa3` is:
.. math::
f(x, a) = a (a + x^a)^{-(a + 1)/a}
for :math:`x > 0` and :math:`a > 0`.
`kappa3` takes ``a`` as a shape parameter for :math:`a`.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
:doi:`10.1175/1520-0493(1973)101<0701:TKDMLE>2.3.CO;2`
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012), :doi:`10.4236/ojs.2012.24050`
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
# kappa3.pdf(x, a) = a*(a + x**a)**(-(a + 1)/a), for x > 0
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if np.any(i < a) else np.nan for i in range(1, 5)]
return outputs[:]
def _mom1_sc(self, m, *args):
if np.any(m >= args[0]):
return np.nan
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class moyal_gen(rv_continuous):
r"""A Moyal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `moyal` is:
.. math::
f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi}
for a real number :math:`x`.
%(after_notes)s
This distribution has utility in high-energy physics and radiation
detection. It describes the energy loss of a charged relativistic
particle due to ionization of the medium [1]_. It also provides an
approximation for the Landau distribution. For an in depth description
see [2]_. For additional description, see [3]_.
References
----------
.. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations",
The London, Edinburgh, and Dublin Philosophical Magazine
and Journal of Science, vol 46, 263-280, (1955).
:doi:`10.1080/14786440308521076` (gated)
.. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution",
International Journal of Research and Reviews in Applied Sciences,
vol 10, 171-192, (2012).
http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf
.. [3] C. Walck, "Handbook on Statistical Distributions for
Experimentalists; International Report SUF-PFY/96-01", Chapter 26,
University of Stockholm: Stockholm, Sweden, (2007).
http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
.. versionadded:: 1.1.0
%(example)s
"""
def _rvs(self, size=None, random_state=None):
u1 = gamma.rvs(a=0.5, scale=2, size=size,
random_state=random_state)
return -np.log(u1)
def _pdf(self, x):
return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi)
def _cdf(self, x):
return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2))
def _sf(self, x):
return sc.erf(np.exp(-0.5 * x) / np.sqrt(2))
def _ppf(self, x):
return -np.log(2 * sc.erfcinv(x)**2)
def _stats(self):
mu = np.log(2) + np.euler_gamma
mu2 = np.pi**2 / 2
g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3
g2 = 4.
return mu, mu2, g1, g2
def _munp(self, n):
if n == 1.0:
return np.log(2) + np.euler_gamma
elif n == 2.0:
return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2
elif n == 3.0:
tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma)
tmp2 = (np.log(2)+np.euler_gamma)**3
tmp3 = 14 * sc.zeta(3)
return tmp1 + tmp2 + tmp3
elif n == 4.0:
tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma)
tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2
tmp3 = (np.log(2) + np.euler_gamma)**4
tmp4 = 7 * np.pi**4 / 4
return tmp1 + tmp2 + tmp3 + tmp4
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n)
moyal = moyal_gen(name="moyal")
class nakagami_gen(rv_continuous):
r"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is:
.. math::
f(x, \nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2)
for :math:`x >= 0`, :math:`\nu > 0`. The distribution was introduced in
[2]_, see also [1]_ for further information.
`nakagami` takes ``nu`` as a shape parameter for :math:`\nu`.
%(after_notes)s
References
----------
.. [1] "Nakagami distribution", Wikipedia
https://en.wikipedia.org/wiki/Nakagami_distribution
.. [2] M. Nakagami, "The m-distribution - A general formula of intensity
distribution of rapid fading", Statistical methods in radio wave
propagation, Pergamon Press, 1960, 3-36.
:doi:`10.1016/B978-0-08-009306-2.50005-4`
%(example)s
"""
def _pdf(self, x, nu):
return np.exp(self._logpdf(x, nu))
def _logpdf(self, x, nu):
# nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
# x**(2*nu-1) * exp(-nu*x**2)
return (np.log(2) + sc.xlogy(nu, nu) - sc.gammaln(nu) +
sc.xlogy(2*nu - 1, x) - nu*x**2)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _sf(self, x, nu):
return sc.gammaincc(nu, nu*x*x)
def _isf(self, p, nu):
return np.sqrt(1/nu * sc.gammainccinv(nu, p))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
def _rvs(self, nu, size=None, random_state=None):
# this relationship can be found in [1] or by a direct calculation
return np.sqrt(random_state.standard_gamma(nu, size=size) / nu)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,) * self.numargs
# Analytical justified estimates
# see: https://docs.scipy.org/doc/scipy/reference/tutorial/stats/continuous_nakagami.html
loc = np.min(data)
scale = np.sqrt(np.sum((data - loc)**2) / len(data))
return args + (loc, scale)
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
r"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is:
.. math::
f(x, k, \lambda) = \frac{1}{2} \exp(-(\lambda+x)/2)
(x/\lambda)^{(k-2)/4} I_{(k-2)/2}(\sqrt{\lambda x})
for :math:`x >= 0` and :math:`k, \lambda > 0`. :math:`k` specifies the
degrees of freedom (denoted ``df`` in the implementation) and
:math:`\lambda` is the non-centrality parameter (denoted ``nc`` in the
implementation). :math:`I_\nu` denotes the modified Bessel function of
first order of degree :math:`\nu` (`scipy.special.iv`).
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc >= 0)
def _rvs(self, df, nc, size=None, random_state=None):
return random_state.noncentral_chisquare(df, nc, size)
def _logpdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_log_pdf, f2=chi2.logpdf)
def _pdf(self, x, df, nc):
# ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
# * I[(df-2)/2](sqrt(nc*x))
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_pdf, f2=chi2.pdf)
def _cdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_cdf, f2=chi2.cdf)
def _ppf(self, q, df, nc):
cond = np.ones_like(q, dtype=bool) & (nc != 0)
return _lazywhere(cond, (q, df, nc), f=sc.chndtrix, f2=chi2.ppf)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
r"""A non-central F distribution continuous random variable.
%(before_notes)s
See Also
--------
scipy.stats.f : Fisher distribution
Notes
-----
The probability density function for `ncf` is:
.. math::
f(x, n_1, n_2, \lambda) =
\exp\left(\frac{\lambda}{2} +
\lambda n_1 \frac{x}{2(n_1 x + n_2)}
\right)
n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\
(n_2 + n_1 x)^{-(n_1 + n_2)/2}
\gamma(n_1/2) \gamma(1 + n_2/2) \\
\frac{L^{\frac{n_1}{2}-1}_{n_2/2}
\left(-\lambda n_1 \frac{x}{2(n_1 x + n_2)}\right)}
{B(n_1/2, n_2/2)
\gamma\left(\frac{n_1 + n_2}{2}\right)}
for :math:`n_1, n_2 > 0`, :math:`\lambda \ge 0`. Here :math:`n_1` is the
degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in
the denominator, :math:`\lambda` the non-centrality parameter,
:math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a
generalized Laguerre polynomial and :math:`B` is the beta function.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters. If ``nc=0``,
the distribution becomes equivalent to the Fisher distribution.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df1, df2, nc):
return (df1 > 0) & (df2 > 0) & (nc >= 0)
def _rvs(self, dfn, dfd, nc, size=None, random_state=None):
return random_state.noncentral_f(dfn, dfd, nc, size)
def _pdf_skip(self, x, dfn, dfd, nc):
# ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
# df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
# (df2+df1*x)**(-(df1+df2)/2) *
# gamma(df1/2)*gamma(1+df2/2) *
# L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
# (B(v1/2, v2/2) * gamma((v1+v2)/2))
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
# Note: the rv_continuous class ensures that dfn > 0 when this function
# is called, so we don't have to check for division by zero with dfn
# in the following.
mu_num = dfd * (dfn + nc)
mu_den = dfn * (dfd - 2)
mu = np.full_like(mu_num, dtype=np.float64, fill_value=np.inf)
np.true_divide(mu_num, mu_den, where=dfd > 2, out=mu)
mu2_num = 2*((dfn + nc)**2 + (dfn + 2*nc)*(dfd - 2))*(dfd/dfn)**2
mu2_den = (dfd - 2)**2 * (dfd - 4)
mu2 = np.full_like(mu2_num, dtype=np.float64, fill_value=np.inf)
np.true_divide(mu2_num, mu2_den, where=dfd > 4, out=mu2)
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
r"""A Student's t continuous random variable.
For the noncentral t distribution, see `nct`.
%(before_notes)s
See Also
--------
nct
Notes
-----
The probability density function for `t` is:
.. math::
f(x, \nu) = \frac{\Gamma((\nu+1)/2)}
{\sqrt{\pi \nu} \Gamma(\nu/2)}
(1+x^2/\nu)^{-(\nu+1)/2}
where :math:`x` is a real number and the degrees of freedom parameter
:math:`\nu` (denoted ``df`` in the implementation) satisfies
:math:`\nu > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
%(after_notes)s
%(example)s
"""
def _argcheck(self, df):
return df > 0
def _rvs(self, df, size=None, random_state=None):
return random_state.standard_t(df, size=size)
def _pdf(self, x, df):
return _lazywhere(
df == np.inf, (x, df),
f=lambda x, df: norm._pdf(x),
f2=lambda x, df: (
np.exp(sc.gammaln((df+1)/2)-sc.gammaln(df/2))
/ (np.sqrt(df*np.pi)*(1+(x**2)/df)**((df+1)/2))
)
)
def _logpdf(self, x, df):
return _lazywhere(
df == np.inf, (x, df),
f=lambda x, df: norm._logpdf(x),
f2=lambda x, df: (
sc.gammaln((df+1)/2) - sc.gammaln(df/2)
- (0.5*np.log(df*np.pi)
+ (df+1)/2*np.log(1+(x**2)/df))
)
)
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
# infinite df -> normal distribution (0.0, 1.0, 0.0, 0.0)
infinite_df = np.isposinf(df)
mu = np.where(df > 1, 0.0, np.inf)
condlist = ((df > 1) & (df <= 2),
(df > 2) & np.isfinite(df),
infinite_df)
choicelist = (lambda df: np.broadcast_to(np.inf, df.shape),
lambda df: df / (df-2.0),
lambda df: np.broadcast_to(1, df.shape))
mu2 = _lazyselect(condlist, choicelist, (df,), np.nan)
g1 = np.where(df > 3, 0.0, np.nan)
condlist = ((df > 2) & (df <= 4),
(df > 4) & np.isfinite(df),
infinite_df)
choicelist = (lambda df: np.broadcast_to(np.inf, df.shape),
lambda df: 6.0 / (df-4.0),
lambda df: np.broadcast_to(0, df.shape))
g2 = _lazyselect(condlist, choicelist, (df,), np.nan)
return mu, mu2, g1, g2
def _entropy(self, df):
if df == np.inf:
return norm._entropy()
half = df/2
half1 = (df + 1)/2
return (half1*(sc.digamma(half1) - sc.digamma(half))
+ np.log(np.sqrt(df)*sc.beta(half, 0.5)))
t = t_gen(name='t')
class nct_gen(rv_continuous):
r"""A non-central Student's t continuous random variable.
%(before_notes)s
Notes
-----
If :math:`Y` is a standard normal random variable and :math:`V` is
an independent chi-square random variable (`chi2`) with :math:`k` degrees
of freedom, then
.. math::
X = \frac{Y + c}{\sqrt{V/k}}
has a non-central Student's t distribution on the real line.
The degrees of freedom parameter :math:`k` (denoted ``df`` in the
implementation) satisfies :math:`k > 0` and the noncentrality parameter
:math:`c` (denoted ``nc`` in the implementation) is a real number.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc, size=None, random_state=None):
n = norm.rvs(loc=nc, size=size, random_state=random_state)
c2 = chi2.rvs(df, size=size, random_state=random_state)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = (n/2.*np.log(n) + sc.gammaln(n+1)
- (n*np.log(2) + nc*nc/2 + (n/2)*np.log(fac1)
+ sc.gammaln(n/2)))
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = (np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
/ np.asarray(fac1*sc.gamma((n+1)/2)))
trm2 = (sc.hyp1f1((n+1)/2, 0.5, valF)
/ np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1)))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. https://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = np.exp(sc.betaln(df/2-0.5, 0.5) - sc.gammaln(0.5))
c11 = np.sqrt(df/2.) * gfac
c20 = np.where(df > 2., df / (df-2.), np.nan)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.nan)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.nan)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
# kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
r"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is:
.. math::
f(x, b) = \frac{b}{x^{b+1}}
for :math:`x \ge 1`, :math:`b > 0`.
`pareto` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
# pareto.pdf(x, b) = b / x**(b+1)
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _sf(self, x, b):
return x**(-b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = np.full(np.shape(b), fill_value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = np.full(np.shape(b), fill_value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = np.full(np.shape(b), fill_value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = np.full(np.shape(b), fill_value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
@_call_super_mom
def fit(self, data, *args, **kwds):
parameters = _check_fit_input_parameters(self, data, args, kwds)
data, fshape, floc, fscale = parameters
if floc is None:
return super().fit(data, **kwds)
if np.any(data - floc < (fscale if fscale else 0)):
raise FitDataError("pareto", lower=1, upper=np.inf)
data = data - floc
# Source: Evans, Hastings, and Peacock (2000), Statistical
# Distributions, 3rd. Ed., John Wiley and Sons. Page 149.
if fscale is None:
fscale = np.min(data)
if fshape is None:
fshape = 1/((1/len(data)) * np.sum(np.log(data/fscale)))
return fshape, floc, fscale
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
r"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lomax` is:
.. math::
f(x, c) = \frac{c}{(1+x)^{c+1}}
for :math:`x \ge 0`, :math:`c > 0`.
`lomax` takes ``c`` as a shape parameter for :math:`c`.
`lomax` is a special case of `pareto` with ``loc=-1.0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# lomax.pdf(x, c) = c / (1+x)**(c+1)
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
r"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is:
.. math::
f(x, \kappa) = \frac{|\beta|}{\Gamma(\alpha)}
(\beta (x - \zeta))^{\alpha - 1}
\exp(-\beta (x - \zeta))
where:
.. math::
\beta = \frac{2}{\kappa}
\alpha = \beta^2 = \frac{4}{\kappa^2}
\zeta = -\frac{\alpha}{\beta} = -\beta
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
Pass the skew :math:`\kappa` into `pearson3` as the shape parameter
``skew``.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays(1.0, x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
m = 0.0
v = 1.0
s = skew
k = 1.5*skew**2
return m, v, s, k
def _pdf(self, x, skew):
# pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
# (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
# use logpdf instead of _logpdf to fix issue mentioned in gh-12640
# (_logpdf does not return correct result for alpha = 1)
ans[invmask] = np.log(abs(beta)) + gamma.logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
skew = np.broadcast_to(skew, invmask.shape)
invmask1a = np.logical_and(invmask, skew > 0)
invmask1b = skew[invmask] > 0
# use cdf instead of _cdf to fix issue mentioned in gh-12640
# (_cdf produces NaNs for inputs outside support)
ans[invmask1a] = gamma.cdf(transx[invmask1b], alpha[invmask1b])
# The gamma._cdf approach wasn't working with negative skew.
# Note that multiplying the skew by -1 reflects about x=0.
# So instead of evaluating the CDF with negative skew at x,
# evaluate the SF with positive skew at -x.
invmask2a = np.logical_and(invmask, skew < 0)
invmask2b = skew[invmask] < 0
# gamma._sf produces NaNs when transx < 0, so use gamma.sf
ans[invmask2a] = gamma.sf(transx[invmask2b], alpha[invmask2b])
return ans
def _rvs(self, skew, size=None, random_state=None):
skew = np.broadcast_to(skew, size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = random_state.standard_normal(nsmall)
ans[invmask] = random_state.standard_gamma(alpha, nbig)/beta + zeta
if size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
Note that method of moments (`method='MM'`) is not
available for this distribution.\n\n""")
def fit(self, data, *args, **kwds):
if kwds.get("method", None) == 'MM':
raise NotImplementedError("Fit `method='MM'` is not available for "
"the Pearson3 distribution. Please try "
"the default `method='MLE'`.")
else:
return super(type(self), self).fit(data, *args, **kwds)
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
r"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is:
.. math::
f(x, a) = a x^{a-1}
for :math:`0 \le x \le 1`, :math:`a > 0`.
`powerlaw` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b=1``.
%(example)s
"""
def _pdf(self, x, a):
# powerlaw.pdf(x, a) = a * x**(a-1)
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
r"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is:
.. math::
f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s)
(\Phi(-\log(x)/s))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`s, c > 0`.
`powerlognorm` takes :math:`c` and :math:`s` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
# powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
# (Phi(-log(x)/s))**(c-1),
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
r"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is:
.. math::
f(x, c) = c \phi(x) (\Phi(-x))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x >= 0`, :math:`c > 0`.
`powernorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
r"""An R-distributed (symmetric beta) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is:
.. math::
f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)}
for :math:`-1 \le x \le 1`, :math:`c > 0`. `rdist` is also called the
symmetric beta distribution: if B has a `beta` distribution with
parameters (c/2, c/2), then X = 2*B - 1 follows a R-distribution with
parameter c.
`rdist` takes ``c`` as a shape parameter for :math:`c`.
This distribution includes the following distribution kernels as
special cases::
c = 2: uniform
c = 3: `semicircular`
c = 4: Epanechnikov (parabolic)
c = 6: quartic (biweight)
c = 8: triweight
%(after_notes)s
%(example)s
"""
# use relation to the beta distribution for pdf, cdf, etc
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return -np.log(2) + beta._logpdf((x + 1)/2, c/2, c/2)
def _cdf(self, x, c):
return beta._cdf((x + 1)/2, c/2, c/2)
def _ppf(self, q, c):
return 2*beta._ppf(q, c/2, c/2) - 1
def _rvs(self, c, size=None, random_state=None):
return 2 * random_state.beta(c/2, c/2, size) - 1
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
def _rayleigh_fit_check_error(ier, msg):
if ier != 1:
raise RuntimeError('rayleigh.fit: fsolve failed to find the root of '
'the first-order conditions of the log-likelihood '
f'function: {msg} (ier={ier})')
class rayleigh_gen(rv_continuous):
r"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is:
.. math::
f(x) = x \exp(-x^2/2)
for :math:`x \ge 0`.
`rayleigh` is a special case of `chi` with ``df=2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return chi.rvs(2, size=size, random_state=random_state)
def _pdf(self, r):
# rayleigh.pdf(r) = r * exp(-r**2/2)
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
Notes specifically for ``rayleigh.fit``: If the location is fixed with
the `floc` parameter, this method uses an analytical formula to find
the scale. Otherwise, this function uses a numerical root finder on
the first order conditions of the log-likelihood function to find the
MLE. Only the (optional) `loc` parameter is used as the initial guess
for the root finder; the `scale` parameter and any other parameters
for the optimizer are ignored.\n\n""")
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
def scale_mle(loc, data):
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 175
return (np.sum((data - loc) ** 2) / (2 * len(data))) ** .5
def loc_mle(loc, data):
# This implicit equation for `loc` is used when
# both `loc` and `scale` are free.
xm = data - loc
s1 = xm.sum()
s2 = (xm**2).sum()
s3 = (1/xm).sum()
return s1 - s2/(2*len(data))*s3
def loc_mle_scale_fixed(loc, scale, data):
# This implicit equation for `loc` is used when
# `scale` is fixed but `loc` is not.
xm = data - loc
return xm.sum() - scale**2 * (1/xm).sum()
if floc is not None:
# `loc` is fixed, analytically determine `scale`.
if np.any(data - floc <= 0):
raise FitDataError("rayleigh", lower=1, upper=np.inf)
else:
return floc, scale_mle(floc, data)
# Account for user provided guess of `loc`.
loc0 = kwds.get('loc')
if loc0 is None:
# Use _fitstart to estimate loc; ignore the returned scale.
loc0 = self._fitstart(data)[0]
if fscale is not None:
# `scale` is fixed
x, info, ier, msg = optimize.fsolve(loc_mle_scale_fixed, x0=loc0,
args=(fscale, data,),
xtol=1e-10, full_output=True)
_rayleigh_fit_check_error(ier, msg)
return x[0], fscale
else:
# Neither `loc` nor `scale` are fixed.
x, info, ier, msg = optimize.fsolve(loc_mle, x0=loc0, args=(data,),
xtol=1e-10, full_output=True)
_rayleigh_fit_check_error(ier, msg)
return x[0], scale_mle(x[0], data)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
r"""A loguniform or reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for this class is:
.. math::
f(x, a, b) = \frac{1}{x \log(b/a)}
for :math:`a \le x \le b`, :math:`b > a > 0`. This class takes
:math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
This doesn't show the equal probability of ``0.01``, ``0.1`` and
``1``. This is best when the x-axis is log-scaled:
>>> import numpy as np
>>> fig, ax = plt.subplots(1, 1)
>>> ax.hist(np.log10(r))
>>> ax.set_ylabel("Frequency")
>>> ax.set_xlabel("Value of random variable")
>>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
>>> ticks = ["$10^{{ {} }}$".format(i) for i in [-2, -1, 0]]
>>> ax.set_xticklabels(ticks) # doctest: +SKIP
>>> plt.show()
This random variable will be log-uniform regardless of the base chosen for
``a`` and ``b``. Let's specify with base ``2`` instead:
>>> rvs = %(name)s(2**-2, 2**0).rvs(size=1000)
Values of ``1/4``, ``1/2`` and ``1`` are equally likely with this random
variable. Here's the histogram:
>>> fig, ax = plt.subplots(1, 1)
>>> ax.hist(np.log2(rvs))
>>> ax.set_ylabel("Frequency")
>>> ax.set_xlabel("Value of random variable")
>>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
>>> ticks = ["$2^{{ {} }}$".format(i) for i in [-2, -1, 0]]
>>> ax.set_xticklabels(ticks) # doctest: +SKIP
>>> plt.show()
"""
def _argcheck(self, a, b):
return (a > 0) & (b > a)
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
# reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
return 1.0 / (x * np.log(b * 1.0 / a))
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(np.log(b * 1.0 / a))
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / np.log(b * 1.0 / a)
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/np.log(b*1.0/a) / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b*1.0/a))
loguniform = reciprocal_gen(name="loguniform")
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
r"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is:
.. math::
f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I_0(x b)
for :math:`x >= 0`, :math:`b > 0`. :math:`I_0` is the modified Bessel
function of order zero (`scipy.special.i0`).
`rice` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
The Rice distribution describes the length, :math:`r`, of a 2-D vector with
components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u,
v` are independent Gaussian random variables with standard deviation
:math:`s`. Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b, size=None, random_state=None):
# https://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + random_state.standard_normal(size=(2,) + size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
#
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
class recipinvgauss_gen(rv_continuous):
r"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2\pi x}}
\exp\left(\frac{-(1-\mu x)^2}{2\mu^2x}\right)
for :math:`x \ge 0`.
`recipinvgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
# recipinvgauss.pdf(x, mu) =
# 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
return np.exp(self._logpdf(x, mu))
def _logpdf(self, x, mu):
return _lazywhere(x > 0, (x, mu),
lambda x, mu: (-(1 - mu*x)**2.0 / (2*x*mu**2.0)
- 0.5*np.log(2*np.pi*x)),
fillvalue=-np.inf)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return _norm_cdf(-isqx*trm1) - np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _sf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return _norm_cdf(isqx*trm1) + np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu, size=None, random_state=None):
return 1.0/random_state.wald(mu, 1.0, size=size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
r"""A semicircular continuous random variable.
%(before_notes)s
See Also
--------
rdist
Notes
-----
The probability density function for `semicircular` is:
.. math::
f(x) = \frac{2}{\pi} \sqrt{1-x^2}
for :math:`-1 \le x \le 1`.
The distribution is a special case of `rdist` with `c = 3`.
%(after_notes)s
References
----------
.. [1] "Wigner semicircle distribution",
https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi*np.sqrt(1-x*x)
def _logpdf(self, x):
return np.log(2/np.pi) + 0.5*sc.log1p(-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _ppf(self, q):
return rdist._ppf(q, 3)
def _rvs(self, size=None, random_state=None):
# generate values uniformly distributed on the area under the pdf
# (semi-circle) by randomly generating the radius and angle
r = np.sqrt(random_state.uniform(size=size))
a = np.cos(np.pi * random_state.uniform(size=size))
return r * a
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skewcauchy_gen(rv_continuous):
r"""A skewed Cauchy random variable.
%(before_notes)s
See Also
--------
cauchy : Cauchy distribution
Notes
-----
The probability density function for `skewcauchy` is:
.. math::
f(x) = \frac{1}{\pi \left(\frac{x^2}{\left(a\, \text{sign}(x) + 1
\right)^2} + 1 \right)}
for a real number :math:`x` and skewness parameter :math:`-1 < a < 1`.
When :math:`a=0`, the distribution reduces to the usual Cauchy
distribution.
%(after_notes)s
References
----------
.. [1] "Skewed generalized *t* distribution", Wikipedia
https://en.wikipedia.org/wiki/Skewed_generalized_t_distribution#Skewed_Cauchy_distribution
%(example)s
"""
def _argcheck(self, a):
return np.abs(a) < 1
def _pdf(self, x, a):
return 1 / (np.pi * (x**2 / (a * np.sign(x) + 1)**2 + 1))
def _cdf(self, x, a):
return np.where(x <= 0,
(1 - a) / 2 + (1 - a) / np.pi * np.arctan(x / (1 - a)),
(1 - a) / 2 + (1 + a) / np.pi * np.arctan(x / (1 + a)))
def _ppf(self, x, a):
i = x < self._cdf(0, a)
return np.where(i,
np.tan(np.pi / (1 - a) * (x - (1 - a) / 2)) * (1 - a),
np.tan(np.pi / (1 + a) * (x - (1 - a) / 2)) * (1 + a))
def _stats(self, a, moments='mvsk'):
return np.nan, np.nan, np.nan, np.nan
def _fitstart(self, data):
# Use 0 as the initial guess of the skewness shape parameter.
# For the location and scale, estimate using the median and
# quartiles.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return 0.0, p50, (p75 - p25)/2
skewcauchy = skewcauchy_gen(name='skewcauchy')
class skew_norm_gen(rv_continuous):
r"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)
`skewnorm` takes a real number :math:`a` as a skewness parameter
When ``a = 0`` the distribution is identical to a normal distribution
(`norm`). `rvs` implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
:arxiv:`0911.2093`
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
if x <= 0:
cdf = integrate.quad(self._pdf, _a, x, args=args)[0]
else:
t1 = integrate.quad(self._pdf, _a, 0, args=args)[0]
t2 = integrate.quad(self._pdf, 0, x, args=args)[0]
cdf = t1 + t2
if cdf > 1:
# Presumably numerical noise, e.g. 1.0000000000000002
cdf = 1.0
return cdf
def _sf(self, x, a):
return self._cdf(-x, -a)
def _rvs(self, a, size=None, random_state=None):
u0 = random_state.normal(size=size)
v = random_state.normal(size=size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapezoid_gen(rv_continuous):
r"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``. This
defines the trapezoid base from ``loc`` to ``(loc+scale)`` and the flat
top from ``c`` to ``d`` proportional to the position along the base
with ``0 <= c <= d <= 1``. When ``c=d``, this is equivalent to `triang`
with the same values for `loc`, `scale` and `c`.
The method of [1]_ is used for computing moments.
`trapezoid` takes :math:`c` and :math:`d` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
References
----------
.. [1] Kacker, R.N. and Lawrence, J.F. (2007). Trapezoidal and triangular
distributions for Type B evaluation of standard uncertainty.
Metrologia 44, 117-127. :doi:`10.1088/0026-1394/44/2/003`
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d-c+1)
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d, u: u * x / c,
lambda x, c, d, u: u,
lambda x, c, d, u: u * (1-x) / (1-d)],
(x, c, d, u))
def _cdf(self, x, c, d):
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d: x**2 / c / (d-c+1),
lambda x, c, d: (c + 2 * (x-c)) / (d-c+1),
lambda x, c, d: 1-((1-x) ** 2
/ (d-c+1) / (1-d))],
(x, c, d))
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
def _munp(self, n, c, d):
# Using the parameterization from Kacker, 2007, with
# a=bottom left, c=top left, d=top right, b=bottom right, then
# E[X^n] = h/(n+1)/(n+2) [(b^{n+2}-d^{n+2})/(b-d)
# - ((c^{n+2} - a^{n+2})/(c-a)]
# with h = 2/((b-a) - (d-c)). The corresponding parameterization
# in scipy, has a'=loc, c'=loc+c*scale, d'=loc+d*scale, b'=loc+scale,
# which for standard form reduces to a'=0, b'=1, c'=c, d'=d.
# Substituting into E[X^n] gives the bd' term as (1 - d^{n+2})/(1 - d)
# and the ac' term as c^{n-1} for the standard form. The bd' term has
# numerical difficulties near d=1, so replace (1 - d^{n+2})/(1-d)
# with expm1((n+2)*log(d))/(d-1).
# Testing with n=18 for c=(1e-30,1-eps) shows that this is stable.
# We still require an explicit test for d=1 to prevent divide by zero,
# and now a test for d=0 to prevent log(0).
ab_term = c**(n+1)
dc_term = _lazyselect(
[d == 0.0, (0.0 < d) & (d < 1.0), d == 1.0],
[lambda d: 1.0,
lambda d: np.expm1((n+2) * np.log(d)) / (d-1.0),
lambda d: n+2],
[d])
val = 2.0 / (1.0+d-c) * (dc_term - ab_term) / ((n+1) * (n+2))
return val
def _entropy(self, c, d):
# Using the parameterization from Wikipedia (van Dorp, 2003)
# with a=bottom left, c=top left, d=top right, b=bottom right
# gives a'=loc, b'=loc+c*scale, c'=loc+d*scale, d'=loc+scale,
# which for loc=0, scale=1 is a'=0, b'=c, c'=d, d'=1.
# Substituting into the entropy formula from Wikipedia gives
# the following result.
return 0.5 * (1.0-d+c) / (1.0+d-c) + np.log(0.5 * (1.0+d-c))
trapezoid = trapezoid_gen(a=0.0, b=1.0, name="trapezoid")
# Note: alias kept for backwards compatibility. Rename was done
# because trapz is a slur in colloquial English (see gh-12924).
trapz = trapezoid_gen(a=0.0, b=1.0, name="trapz")
if trapz.__doc__:
trapz.__doc__ = "trapz is an alias for `trapezoid`"
class triang_gen(rv_continuous):
r"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc + scale)``.
`triang` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return random_state.triangular(0, c, 1, size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
# 0: edge case where c=0
# 1: generalised case for x < c, don't use x <= c, as it doesn't cope
# with c = 0.
# 2: generalised case for x >= c, but doesn't cope with c = 1
# 3: edge case where c=1
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2 - 2 * x,
lambda x, c: 2 * x / c,
lambda x, c: 2 * (1 - x) / (1 - c),
lambda x, c: 2 * x],
(x, c))
return r
def _cdf(self, x, c):
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2*x - x*x,
lambda x, c: x * x / c,
lambda x, c: (x*x - 2*x + c) / (c-1),
lambda x, c: x * x],
(x, c))
return r
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
r"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is:
.. math::
f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)}
for :math:`0 <= x <= b`.
`truncexpon` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
return b > 0
def _get_support(self, b):
return self.a, b
def _pdf(self, x, b):
# truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
TRUNCNORM_TAIL_X = 30
TRUNCNORM_MAX_BRENT_ITERS = 40
def _truncnorm_get_delta_scalar(a, b):
if (a > TRUNCNORM_TAIL_X) or (b < -TRUNCNORM_TAIL_X):
return 0
if a > 0:
delta = _norm_sf(a) - _norm_sf(b)
else:
delta = _norm_cdf(b) - _norm_cdf(a)
delta = max(delta, 0)
return delta
def _truncnorm_get_delta(a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_get_delta_scalar(a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_get_delta_scalar(a.item(), b.item())
delta = np.zeros(np.shape(a))
condinner = (a <= TRUNCNORM_TAIL_X) & (b >= -TRUNCNORM_TAIL_X)
conda = (a > 0) & condinner
condb = (a <= 0) & condinner
if np.any(conda):
np.place(delta, conda, _norm_sf(a[conda]) - _norm_sf(b[conda]))
if np.any(condb):
np.place(delta, condb, _norm_cdf(b[condb]) - _norm_cdf(a[condb]))
delta[delta < 0] = 0
return delta
def _truncnorm_get_logdelta_scalar(a, b):
if (a <= TRUNCNORM_TAIL_X) and (b >= -TRUNCNORM_TAIL_X):
if a > 0:
delta = _norm_sf(a) - _norm_sf(b)
else:
delta = _norm_cdf(b) - _norm_cdf(a)
delta = max(delta, 0)
if delta > 0:
return np.log(delta)
if b < 0 or (np.abs(a) >= np.abs(b)):
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
logdelta = nlb + np.log1p(-np.exp(nla - nlb))
else:
sla, slb = _norm_logsf(a), _norm_logsf(b)
logdelta = sla + np.log1p(-np.exp(slb - sla))
return logdelta
def _truncnorm_logpdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x < a:
return -np.inf
if x > b:
return -np.inf
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlta, condgtb = (x < a), (x > b)
if np.any(condlta):
np.place(out, condlta, -np.inf)
if np.any(condgtb):
np.place(out, condgtb, -np.inf)
cond_inner = ~condlta & ~condgtb
if np.any(cond_inner):
_logdelta = _truncnorm_get_logdelta_scalar(a, b)
np.place(out, cond_inner, _norm_logpdf(x[cond_inner]) - _logdelta)
return (out[0] if (shp == ()) else out)
def _truncnorm_pdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x < a:
return 0.0
if x > b:
return 0.0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlta, condgtb = (x < a), (x > b)
if np.any(condlta):
np.place(out, condlta, 0.0)
if np.any(condgtb):
np.place(out, condgtb, 0.0)
cond_inner = ~condlta & ~condgtb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner, _norm_pdf(x[cond_inner]) / delta)
else:
np.place(out, cond_inner,
np.exp(_truncnorm_logpdf_scalar(x[cond_inner], a, b)))
return (out[0] if (shp == ()) else out)
def _truncnorm_logcdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return -np.inf
if x >= b:
return 0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, -np.inf)
if np.any(condgeb):
np.place(out, condgeb, 0.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
np.log((_norm_cdf(x[cond_inner]) - _norm_cdf(a))
/ delta))
else:
with np.errstate(divide='ignore'):
if a < 0:
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
tab = np.log1p(-np.exp(nla - nlb))
nlx = _norm_logcdf(x[cond_inner])
tax = np.log1p(-np.exp(nla - nlx))
np.place(out, cond_inner, nlx + tax - (nlb + tab))
else:
sla = _norm_logsf(a)
slb = _norm_logsf(b)
np.place(out, cond_inner,
np.log1p(-np.exp(_norm_logsf(x[cond_inner])
- sla))
- np.log1p(-np.exp(slb - sla)))
return (out[0] if (shp == ()) else out)
def _truncnorm_cdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return -0
if x >= b:
return 1
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 0)
if np.any(condgeb):
np.place(out, condgeb, 1.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
(_norm_cdf(x[cond_inner]) - _norm_cdf(a)) / delta)
else:
with np.errstate(divide='ignore'):
np.place(out, cond_inner,
np.exp(_truncnorm_logcdf_scalar(x[cond_inner],
a, b)))
return (out[0] if (shp == ()) else out)
def _truncnorm_logsf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return 0.0
if x >= b:
return -np.inf
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 0)
if np.any(condgeb):
np.place(out, condgeb, -np.inf)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
np.log((_norm_sf(x[cond_inner]) - _norm_sf(b))
/ delta))
else:
with np.errstate(divide='ignore'):
if b < 0:
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
np.place(out, cond_inner,
np.log1p(-np.exp(_norm_logcdf(x[cond_inner])
- nlb))
- np.log1p(-np.exp(nla - nlb)))
else:
sla, slb = _norm_logsf(a), _norm_logsf(b)
tab = np.log1p(-np.exp(slb - sla))
slx = _norm_logsf(x[cond_inner])
tax = np.log1p(-np.exp(slb - slx))
np.place(out, cond_inner, slx + tax - (sla + tab))
return (out[0] if (shp == ()) else out)
def _truncnorm_sf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return 1.0
if x >= b:
return 0.0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 1.0)
if np.any(condgeb):
np.place(out, condgeb, 0.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
(_norm_sf(x[cond_inner]) - _norm_sf(b)) / delta)
else:
np.place(out, cond_inner,
np.exp(_truncnorm_logsf_scalar(x[cond_inner], a, b)))
return (out[0] if (shp == ()) else out)
def _norm_logcdfprime(z):
# derivative of special.log_ndtr (See special/cephes/ndtr.c)
# Differentiate formula for log Phi(z)_truncnorm_ppf
# log Phi(z) = -z^2/2 - log(-z) - log(2pi)/2
# + log(1 + sum (-1)^n (2n-1)!! / z^(2n))
# Convergence of series is slow for |z| < 10, but can use
# d(log Phi(z))/dz = dPhi(z)/dz / Phi(z)
# Just take the first 10 terms because that is sufficient for use
# in _norm_ilogcdf
assert np.all(z <= -10)
lhs = -z - 1/z
denom_cons = 1/z**2
numerator = 1
pwr = 1.0
denom_total, numerator_total = 0, 0
sign = -1
for i in range(1, 11):
pwr *= denom_cons
numerator *= 2 * i - 1
term = sign * numerator * pwr
denom_total += term
numerator_total += term * (2 * i) / z
sign = -sign
return lhs - numerator_total / (1 + denom_total)
def _norm_ilogcdf(y):
"""Inverse function to _norm_logcdf==sc.log_ndtr."""
# Apply approximate Newton-Raphson
# Only use for very negative values of y.
# At minimum requires y <= -(log(2pi)+2^2)/2 ~= -2.9
# Much better convergence for y <= -10
z = -np.sqrt(-2 * (y + np.log(2*np.pi)/2))
for _ in range(4):
z = z - (_norm_logcdf(z) - y) / _norm_logcdfprime(z)
return z
def _truncnorm_ppf_scalar(q, a, b):
shp = np.shape(q)
q = np.atleast_1d(q)
out = np.zeros(np.shape(q))
condle0, condge1 = (q <= 0), (q >= 1)
if np.any(condle0):
out[condle0] = a
if np.any(condge1):
out[condge1] = b
delta = _truncnorm_get_delta_scalar(a, b)
cond_inner = ~condle0 & ~condge1
if np.any(cond_inner):
qinner = q[cond_inner]
if delta > 0:
if a > 0:
sa, sb = _norm_sf(a), _norm_sf(b)
np.place(out, cond_inner,
_norm_isf(qinner * sb + sa * (1.0 - qinner)))
else:
na, nb = _norm_cdf(a), _norm_cdf(b)
np.place(out, cond_inner,
_norm_ppf(qinner * nb + na * (1.0 - qinner)))
elif np.isinf(b):
np.place(out, cond_inner,
-_norm_ilogcdf(np.log1p(-qinner) + _norm_logsf(a)))
elif np.isinf(a):
np.place(out, cond_inner,
_norm_ilogcdf(np.log(q) + _norm_logcdf(b)))
else:
if b < 0:
# Solve
# norm_logcdf(x)
# = norm_logcdf(a) + log1p(q * (expm1(norm_logcdf(b)
# - norm_logcdf(a)))
# = nla + log1p(q * expm1(nlb - nla))
# = nlb + log(q) + log1p((1-q) * exp(nla - nlb)/q)
def _f_cdf(x, c):
return _norm_logcdf(x) - c
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
values = nlb + np.log(q[cond_inner])
C = np.exp(nla - nlb)
if C:
one_minus_q = (1 - q)[cond_inner]
values += np.log1p(one_minus_q * C / q[cond_inner])
x = [optimize._zeros_py.brentq(_f_cdf, a, b, args=(c,),
maxiter=TRUNCNORM_MAX_BRENT_ITERS)
for c in values]
np.place(out, cond_inner, x)
else:
# Solve
# norm_logsf(x)
# = norm_logsf(b) + log1p((1-q) * (expm1(norm_logsf(a)
# - norm_logsf(b)))
# = slb + log1p((1-q)[cond_inner] * expm1(sla - slb))
# = sla + log(1-q) + log1p(q * np.exp(slb - sla)/(1-q))
def _f_sf(x, c):
return _norm_logsf(x) - c
sla, slb = _norm_logsf(a), _norm_logsf(b)
one_minus_q = (1-q)[cond_inner]
values = sla + np.log(one_minus_q)
C = np.exp(slb - sla)
if C:
values += np.log1p(q[cond_inner] * C / one_minus_q)
x = [optimize._zeros_py.brentq(_f_sf, a, b, args=(c,),
maxiter=TRUNCNORM_MAX_BRENT_ITERS)
for c in values]
np.place(out, cond_inner, x)
out[out < a] = a
out[out > b] = b
return (out[0] if (shp == ()) else out)
class truncnorm_gen(rv_continuous):
r"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return a < b
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_pdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_pdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _ld) in it:
_ld[...] = _truncnorm_pdf_scalar(_x, _a, _b)
return it.operands[3]
def _logpdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logpdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logpdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _ld) in it:
_ld[...] = _truncnorm_logpdf_scalar(_x, _a, _b)
return it.operands[3]
def _cdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_cdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_cdf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_cdf_scalar(_x, _a, _b)
return it.operands[3]
def _logcdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logcdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logcdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_logcdf_scalar(_x, _a, _b)
return it.operands[3]
def _sf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_sf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_sf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_sf_scalar(_x, _a, _b)
return it.operands[3]
def _logsf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logsf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logsf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_logsf_scalar(_x, _a, _b)
return it.operands[3]
def _ppf(self, q, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_ppf_scalar(q, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_ppf_scalar(q, a.item(), b.item())
out = None
it = np.nditer([q, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'],
['writeonly', 'allocate']])
for (_q, _a, _b, _x) in it:
_x[...] = _truncnorm_ppf_scalar(_q, _a, _b)
return it.operands[3]
def _munp(self, n, a, b):
def n_th_moment(n, a, b):
"""
Returns n-th moment. Defined only if n >= 0.
Function cannot broadcast due to the loop over n
"""
pA, pB = self._pdf([a, b], a, b)
probs = [pA, -pB]
moments = [0, 1]
for k in range(1, n+1):
# a or b might be infinite, and the corresponding pdf value
# is 0 in that case, but nan is returned for the
# multiplication. However, as b->infinity, pdf(b)*b**k -> 0.
# So it is safe to use _lazywhere to avoid the nan.
vals = _lazywhere(probs, [probs, [a, b]],
lambda x, y: x * y**(k-1), fillvalue=0)
mk = np.sum(vals) + (k-1) * moments[-2]
moments.append(mk)
return moments[-1]
return _lazywhere((n >= 0) & (a == a) & (b == b), (n, a, b),
np.vectorize(n_th_moment, otypes=[np.float64]),
np.nan)
def _stats(self, a, b, moments='mv'):
pA, pB = self.pdf(np.array([a, b]), a, b)
def _truncnorm_stats_scalar(a, b, pA, pB, moments):
m1 = pA - pB
mu = m1
# use _lazywhere to avoid nan (See detailed comment in _munp)
probs = [pA, -pB]
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y,
fillvalue=0)
m2 = 1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a-mu, b-mu]], lambda x, y: x*y,
fillvalue=0)
# mu2 = m2 - mu**2, but not as numerically stable as:
# mu2 = (a-mu)*pA - (b-mu)*pB + 1
mu2 = 1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**2,
fillvalue=0)
m3 = 2*m1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**3,
fillvalue=0)
m4 = 3*m2 + np.sum(vals)
mu3 = m3 + m1 * (-3*m2 + 2*m1**2)
g1 = mu3 / np.power(mu2, 1.5)
mu4 = m4 + m1*(-4*m3 + 3*m1*(2*m2 - m1**2))
g2 = mu4 / mu2**2 - 3
return mu, mu2, g1, g2
_truncnorm_stats = np.vectorize(_truncnorm_stats_scalar,
excluded=('moments',))
return _truncnorm_stats(a, b, pA, pB, moments)
def _rvs(self, a, b, size=None, random_state=None):
# if a and b are scalar, use _rvs_scalar, otherwise need to create
# output by iterating over parameters
if np.isscalar(a) and np.isscalar(b):
out = self._rvs_scalar(a, b, size, random_state=random_state)
elif a.size == 1 and b.size == 1:
out = self._rvs_scalar(a.item(), b.item(), size,
random_state=random_state)
else:
# When this method is called, size will be a (possibly empty)
# tuple of integers. It will not be None; if `size=None` is passed
# to `rvs()`, size will be the empty tuple ().
a, b = np.broadcast_arrays(a, b)
# a and b now have the same shape.
# `shp` is the shape of the blocks of random variates that are
# generated for each combination of parameters associated with
# broadcasting a and b.
# bc is a tuple the same length as size. The values
# in bc are bools. If bc[j] is True, it means that
# entire axis is filled in for a given combination of the
# broadcast arguments.
shp, bc = _check_shape(a.shape, size)
# `numsamples` is the total number of variates to be generated
# for each combination of the input arguments.
numsamples = int(np.prod(shp))
# `out` is the array to be returned. It is filled in in the
# loop below.
out = np.empty(size)
it = np.nditer([a, b],
flags=['multi_index'],
op_flags=[['readonly'], ['readonly']])
while not it.finished:
# Convert the iterator's multi_index into an index into the
# `out` array where the call to _rvs_scalar() will be stored.
# Where bc is True, we use a full slice; otherwise we use the
# index value from it.multi_index. len(it.multi_index) might
# be less than len(bc), and in that case we want to align these
# two sequences to the right, so the loop variable j runs from
# -len(size) to 0. This doesn't cause an IndexError, as
# bc[j] will be True in those cases where it.multi_index[j]
# would cause an IndexError.
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
out[idx] = self._rvs_scalar(it[0], it[1], numsamples,
random_state).reshape(shp)
it.iternext()
if size == ():
out = out.item()
return out
def _rvs_scalar(self, a, b, numsamples=None, random_state=None):
if not numsamples:
numsamples = 1
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# Calculate some rvs
U = random_state.uniform(low=0, high=1, size=N)
x = self._ppf(U, a, b)
rvs = np.reshape(x, size1d)
return rvs
truncnorm = truncnorm_gen(name='truncnorm', momtype=1)
class tukeylambda_gen(rv_continuous):
r"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (:math:`lambda = -1`)
- logistic (:math:`lambda = 0`)
- approx Normal (:math:`lambda = 0.14`)
- uniform from -1 to 1 (:math:`lambda = 1`)
`tukeylambda` takes a real number :math:`lambda` (denoted ``lam``
in the implementation) as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class FitUniformFixedScaleDataError(FitDataError):
def __init__(self, ptp, fscale):
self.args = (
"Invalid values in `data`. Maximum likelihood estimation with "
"the uniform distribution and fixed scale requires that "
"data.ptp() <= fscale, but data.ptp() = %r and fscale = %r." %
(ptp, fscale),
)
class uniform_gen(rv_continuous):
r"""A uniform continuous random variable.
In the standard form, the distribution is uniform on ``[0, 1]``. Using
the parameters ``loc`` and ``scale``, one obtains the uniform distribution
on ``[loc, loc + scale]``.
%(before_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.uniform(0.0, 1.0, size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
@_call_super_mom
def fit(self, data, *args, **kwds):
"""
Maximum likelihood estimate for the location and scale parameters.
`uniform.fit` uses only the following parameters. Because exact
formulas are used, the parameters related to optimization that are
available in the `fit` method of other distributions are ignored
here. The only positional argument accepted is `data`.
Parameters
----------
data : array_like
Data to use in calculating the maximum likelihood estimate.
floc : float, optional
Hold the location parameter fixed to the specified value.
fscale : float, optional
Hold the scale parameter fixed to the specified value.
Returns
-------
loc, scale : float
Maximum likelihood estimates for the location and scale.
Notes
-----
An error is raised if `floc` is given and any values in `data` are
less than `floc`, or if `fscale` is given and `fscale` is less
than ``data.max() - data.min()``. An error is also raised if both
`floc` and `fscale` are given.
Examples
--------
>>> from scipy.stats import uniform
We'll fit the uniform distribution to `x`:
>>> x = np.array([2, 2.5, 3.1, 9.5, 13.0])
For a uniform distribution MLE, the location is the minimum of the
data, and the scale is the maximum minus the minimum.
>>> loc, scale = uniform.fit(x)
>>> loc
2.0
>>> scale
11.0
If we know the data comes from a uniform distribution where the support
starts at 0, we can use `floc=0`:
>>> loc, scale = uniform.fit(x, floc=0)
>>> loc
0.0
>>> scale
13.0
Alternatively, if we know the length of the support is 12, we can use
`fscale=12`:
>>> loc, scale = uniform.fit(x, fscale=12)
>>> loc
1.5
>>> scale
12.0
In that last example, the support interval is [1.5, 13.5]. This
solution is not unique. For example, the distribution with ``loc=2``
and ``scale=12`` has the same likelihood as the one above. When
`fscale` is given and it is larger than ``data.max() - data.min()``,
the parameters returned by the `fit` method center the support over
the interval ``[data.min(), data.max()]``.
"""
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
# MLE for the uniform distribution
# --------------------------------
# The PDF is
#
# f(x, loc, scale) = {1/scale for loc <= x <= loc + scale
# {0 otherwise}
#
# The likelihood function is
# L(x, loc, scale) = (1/scale)**n
# where n is len(x), assuming loc <= x <= loc + scale for all x.
# The log-likelihood is
# l(x, loc, scale) = -n*log(scale)
# The log-likelihood is maximized by making scale as small as possible,
# while keeping loc <= x <= loc + scale. So if neither loc nor scale
# are fixed, the log-likelihood is maximized by choosing
# loc = x.min()
# scale = x.ptp()
# If loc is fixed, it must be less than or equal to x.min(), and then
# the scale is
# scale = x.max() - loc
# If scale is fixed, it must not be less than x.ptp(). If scale is
# greater than x.ptp(), the solution is not unique. Note that the
# likelihood does not depend on loc, except for the requirement that
# loc <= x <= loc + scale. All choices of loc for which
# x.max() - scale <= loc <= x.min()
# have the same log-likelihood. In this case, we choose loc such that
# the support is centered over the interval [data.min(), data.max()]:
# loc = x.min() = 0.5*(scale - x.ptp())
if fscale is None:
# scale is not fixed.
if floc is None:
# loc is not fixed, scale is not fixed.
loc = data.min()
scale = data.ptp()
else:
# loc is fixed, scale is not fixed.
loc = floc
scale = data.max() - loc
if data.min() < loc:
raise FitDataError("uniform", lower=loc, upper=loc + scale)
else:
# loc is not fixed, scale is fixed.
ptp = data.ptp()
if ptp > fscale:
raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale)
# If ptp < fscale, the ML estimate is not unique; see the comments
# above. We choose the distribution for which the support is
# centered over the interval [data.min(), data.max()].
loc = data.min() - 0.5*(fscale - ptp)
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
r"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `vonmises` and `vonmises_line` is:
.. math::
f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I_0(\kappa) }
for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`. :math:`I_0` is the
modified Bessel function of order zero (`scipy.special.i0`).
`vonmises` is a circular distribution which does not restrict the
distribution to a fixed interval. Currently, there is no circular
distribution framework in scipy. The ``cdf`` is implemented such that
``cdf(x + 2*np.pi) == cdf(x) + 1``.
`vonmises_line` is the same distribution, defined on :math:`[-\pi, \pi]`
on the real line. This is a regular (i.e. non-circular) distribution.
`vonmises` and `vonmises_line` take ``kappa`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, kappa, size=None, random_state=None):
return random_state.vonmises(0.0, kappa, size=size)
def _pdf(self, x, kappa):
# vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
# = exp(kappa * (cos(x) - 1)) /
# (2*pi*exp(-kappa)*I[0](kappa))
# = exp(kappa * cosm1(x)) / (2*pi*i0e(kappa))
return np.exp(kappa*sc.cosm1(x)) / (2*np.pi*sc.i0e(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
r"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x })
for :math:`x >= 0`.
`wald` is a special case of `invgauss` with ``mu=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return random_state.wald(1.0, 1.0, size=size)
def _pdf(self, x):
# wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
return invgauss._pdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _sf(self, x):
return invgauss._sf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _logcdf(self, x):
return invgauss._logcdf(x, 1.0)
def _logsf(self, x):
return invgauss._logsf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
r"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is:
.. math::
f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))}
for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`.
`wrapcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
# wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
def f1(x, cr):
# CDF for 0 <= x < pi
return 1/np.pi * np.arctan(cr*np.tan(x/2))
def f2(x, cr):
# CDF for pi <= x <= 2*pi
return 1 - 1/np.pi * np.arctan(cr*np.tan((2*np.pi - x)/2))
cr = (1 + c)/(1 - c)
return _lazywhere(x < np.pi, (x, cr), f=f1, f2=f2)
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
def _fitstart(self, data):
# Use 0.5 as the initial guess of the shape parameter.
# For the location and scale, use the minimum and
# peak-to-peak/(2*pi), respectively.
return 0.5, np.min(data), np.ptp(data)/(2*np.pi)
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
r"""A generalized normal continuous random variable.
%(before_notes)s
See Also
--------
laplace : Laplace distribution
norm : normal distribution
Notes
-----
The probability density function for `gennorm` is [1]_:
.. math::
f(x, \beta) = \frac{\beta}{2 \Gamma(1/\beta)} \exp(-|x|^\beta)
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to a Laplace distribution.
For :math:`\beta = 2`, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
.. [2] Nardon, Martina, and Paolo Pianca. "Simulation techniques for
generalized Gaussian densities." Journal of Statistical
Computation and Simulation 79.11 (2009): 1317-1329
.. [3] Wicklin, Rick. "Simulate data from a generalized Gaussian
distribution" in The DO Loop blog, September 21, 2016,
https://blogs.sas.com/content/iml/2016/09/21/simulate-generalized-gaussian-sas.html
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
def _rvs(self, beta, size=None, random_state=None):
# see [2]_ for the algorithm
# see [3]_ for reference implementation in SAS
z = random_state.gamma(1/beta, size=size)
y = z ** (1/beta)
# convert y to array to ensure masking support
y = np.asarray(y)
mask = random_state.random(size=y.shape) < 0.5
y[mask] = -y[mask]
return y
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
r"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
Notes
-----
The probability density function for `halfgennorm` is:
.. math::
f(x, \beta) = \frac{\beta}{\Gamma(1/\beta)} \exp(-|x|^\beta)
for :math:`x > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to an exponential distribution.
For :math:`\beta = 2`, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
# beta
# halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
# gamma(1/beta)
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
class crystalball_gen(rv_continuous):
r"""
Crystalball distribution
%(before_notes)s
Notes
-----
The probability density function for `crystalball` is:
.. math::
f(x, \beta, m) = \begin{cases}
N \exp(-x^2 / 2), &\text{for } x > -\beta\\
N A (B - x)^{-m} &\text{for } x \le -\beta
\end{cases}
where :math:`A = (m / |\beta|)^m \exp(-\beta^2 / 2)`,
:math:`B = m/|\beta| - |\beta|` and :math:`N` is a normalisation constant.
`crystalball` takes :math:`\beta > 0` and :math:`m > 1` as shape
parameters. :math:`\beta` defines the point where the pdf changes
from a power-law to a Gaussian distribution. :math:`m` is the power
of the power-law tail.
References
----------
.. [1] "Crystal Ball Function",
https://en.wikipedia.org/wiki/Crystal_Ball_function
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, beta, m):
"""
Return PDF of the crystalball function.
--
| exp(-x**2 / 2), for x > -beta
crystalball.pdf(x, beta, m) = N * |
| A * (B - x)**(-m), for x <= -beta
--
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return np.exp(-x**2 / 2)
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _logpdf(self, x, beta, m):
"""
Return the log of the PDF of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return -x**2/2
def lhs(x, beta, m):
return m*np.log(m/beta) - beta**2/2 - m*np.log(m/beta - beta - x)
return np.log(N) + _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _cdf(self, x, beta, m):
"""
Return CDF of the crystalball function
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return ((m/beta) * np.exp(-beta**2 / 2.0) / (m-1) +
_norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta)))
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m+1) / (m-1))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _ppf(self, p, beta, m):
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
pbeta = N * (m/beta) * np.exp(-beta**2/2) / (m - 1)
def ppf_less(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return (m/beta - beta -
((m - 1)*(m/beta)**(-m)/eb2*p/N)**(1/(1-m)))
def ppf_greater(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return _norm_ppf(_norm_cdf(-beta) + (1/_norm_pdf_C)*(p/N - C))
return _lazywhere(p < pbeta, (p, beta, m), f=ppf_less, f2=ppf_greater)
def _munp(self, n, beta, m):
"""
Returns the n-th non-central moment of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def n_th_moment(n, beta, m):
"""
Returns n-th moment. Defined only if n+1 < m
Function cannot broadcast due to the loop over n
"""
A = (m/beta)**m * np.exp(-beta**2 / 2.0)
B = m/beta - beta
rhs = (2**((n-1)/2.0) * sc.gamma((n+1)/2) *
(1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2)))
lhs = np.zeros(rhs.shape)
for k in range(n + 1):
lhs += (sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) *
(m/beta)**(-m + k + 1))
return A * lhs + rhs
return N * _lazywhere(n + 1 < m, (n, beta, m),
np.vectorize(n_th_moment, otypes=[np.float64]),
np.inf)
def _argcheck(self, beta, m):
"""
Shape parameter bounds are m > 1 and beta > 0.
"""
return (m > 1) & (beta > 0)
crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function")
def _argus_phi(chi):
"""
Utility function for the argus distribution used in the pdf, sf and
moment calculation.
Note that for all x > 0:
gammainc(1.5, x**2/2) = 2 * (_norm_cdf(x) - x * _norm_pdf(x) - 0.5).
This can be verified directly by noting that the cdf of Gamma(1.5) can
be written as erf(sqrt(x)) - 2*sqrt(x)*exp(-x)/sqrt(Pi).
We use gammainc instead of the usual definition because it is more precise
for small chi.
"""
return sc.gammainc(1.5, chi**2/2) / 2
class argus_gen(rv_continuous):
r"""
Argus distribution
%(before_notes)s
Notes
-----
The probability density function for `argus` is:
.. math::
f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2}
\exp(-\chi^2 (1 - x^2)/2)
for :math:`0 < x < 1` and :math:`\chi > 0`, where
.. math::
\Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2
with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard
normal distribution, respectively.
`argus` takes :math:`\chi` as shape a parameter.
%(after_notes)s
References
----------
.. [1] "ARGUS distribution",
https://en.wikipedia.org/wiki/ARGUS_distribution
.. versionadded:: 0.19.0
%(example)s
"""
def _logpdf(self, x, chi):
# for x = 0 or 1, logpdf returns -np.inf
with np.errstate(divide='ignore'):
y = 1.0 - x*x
A = 3*np.log(chi) - _norm_pdf_logC - np.log(_argus_phi(chi))
return A + np.log(x) + 0.5*np.log1p(-x*x) - chi**2 * y / 2
def _pdf(self, x, chi):
return np.exp(self._logpdf(x, chi))
def _cdf(self, x, chi):
return 1.0 - self._sf(x, chi)
def _sf(self, x, chi):
return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
def _rvs(self, chi, size=None, random_state=None):
chi = np.asarray(chi)
if chi.size == 1:
out = self._rvs_scalar(chi, numsamples=size,
random_state=random_state)
else:
shp, bc = _check_shape(chi.shape, size)
numsamples = int(np.prod(shp))
out = np.empty(size)
it = np.nditer([chi],
flags=['multi_index'],
op_flags=[['readonly']])
while not it.finished:
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
r = self._rvs_scalar(it[0], numsamples=numsamples,
random_state=random_state)
out[idx] = r.reshape(shp)
it.iternext()
if size == ():
out = out[()]
return out
def _rvs_scalar(self, chi, numsamples=None, random_state=None):
# if chi <= 1.8:
# use rejection method, see Devroye:
# Non-Uniform Random Variate Generation, 1986, section II.3.2.
# write: PDF f(x) = c * g(x) * h(x), where
# h is [0,1]-valued and g is a density
# we use two ways to write f
#
# Case 1:
# write g(x) = 3*x*sqrt(1-x**2), h(x) = exp(-chi**2 (1-x**2) / 2)
# If X has a distribution with density g its ppf G_inv is given by:
# G_inv(u) = np.sqrt(1 - u**(2/3))
#
# Case 2:
# g(x) = chi**2 * x * exp(-chi**2 * (1-x**2)/2) / (1 - exp(-chi**2 /2))
# h(x) = sqrt(1 - x**2), 0 <= x <= 1
# one can show that
# G_inv(u) = np.sqrt(2*np.log(u*(np.exp(chi**2/2)-1)+1))/chi
# = np.sqrt(1 + 2*np.log(np.exp(-chi**2/2)*(1-u)+u)/chi**2)
# the latter expression is used for precision with small chi
#
# In both cases, the inverse cdf of g can be written analytically, and
# we can apply the rejection method:
#
# REPEAT
# Generate U uniformly distributed on [0, 1]
# Generate X with density g (e.g. via inverse transform sampling:
# X = G_inv(V) with V uniformly distributed on [0, 1])
# UNTIL X <= h(X)
# RETURN X
#
# We use case 1 for chi <= 0.5 as it maintains precision for small chi
# and case 2 for 0.5 < chi <= 1.8 due to its speed for moderate chi.
#
# if chi > 1.8:
# use relation to the Gamma distribution: if X is ARGUS with parameter
# chi), then Y = chi**2 * (1 - X**2) / 2 has density proportional to
# sqrt(u) * exp(-u) on [0, chi**2 / 2], i.e. a Gamma(3/2) distribution
# conditioned on [0, chi**2 / 2]). Therefore, to sample X from the
# ARGUS distribution, we sample Y from the gamma distribution, keeping
# only samples on [0, chi**2 / 2], and apply the inverse
# transformation X = (1 - 2*Y/chi**2)**(1/2). Since we only
# look at chi > 1.8, gamma(1.5).cdf(chi**2/2) is large enough such
# Y falls in the inteval [0, chi**2 / 2] with a high probability:
# stats.gamma(1.5).cdf(1.8**2/2) = 0.644...
#
# The points to switch between the different methods are determined
# by a comparison of the runtime of the different methods. However,
# the runtime is platform-dependent. The implemented values should
# ensure a good overall performance and are supported by an analysis
# of the rejection constants of different methods.
size1d = tuple(np.atleast_1d(numsamples))
N = int(np.prod(size1d))
x = np.zeros(N)
simulated = 0
chi2 = chi * chi
if chi <= 0.5:
d = -chi2 / 2
while simulated < N:
k = N - simulated
u = random_state.uniform(size=k)
v = random_state.uniform(size=k)
z = v**(2/3)
# acceptance condition: u <= h(G_inv(v)). This simplifies to
accept = (np.log(u) <= d * z)
num_accept = np.sum(accept)
if num_accept > 0:
# we still need to transform z=v**(2/3) to X = G_inv(v)
rvs = np.sqrt(1 - z[accept])
x[simulated:(simulated + num_accept)] = rvs
simulated += num_accept
elif chi <= 1.8:
echi = np.exp(-chi2 / 2)
while simulated < N:
k = N - simulated
u = random_state.uniform(size=k)
v = random_state.uniform(size=k)
z = 2 * np.log(echi * (1 - v) + v) / chi2
# as in case one, simplify u <= h(G_inv(v)) and then transform
# z to the target distribution X = G_inv(v)
accept = (u**2 + z <= 0)
num_accept = np.sum(accept)
if num_accept > 0:
rvs = np.sqrt(1 + z[accept])
x[simulated:(simulated + num_accept)] = rvs
simulated += num_accept
else:
# conditional Gamma for chi > 1.8
while simulated < N:
k = N - simulated
g = random_state.standard_gamma(1.5, size=k)
accept = (g <= chi2 / 2)
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = g[accept]
simulated += num_accept
x = np.sqrt(1 - 2 * x / chi2)
return np.reshape(x, size1d)
def _stats(self, chi):
# need to ensure that dtype is float
# otherwise the mask below does not work for integers
chi = np.asarray(chi, dtype=float)
phi = _argus_phi(chi)
m = np.sqrt(np.pi/8) * chi * sc.ive(1, chi**2/4) / phi
# compute second moment, use Taylor expansion for small chi (<= 0.1)
mu2 = np.empty_like(chi)
mask = chi > 0.1
c = chi[mask]
mu2[mask] = 1 - 3 / c**2 + c * _norm_pdf(c) / phi[mask]
c = chi[~mask]
coef = [-358/65690625, 0, -94/1010625, 0, 2/2625, 0, 6/175, 0, 0.4]
mu2[~mask] = np.polyval(coef, c)
return m, mu2 - m**2, None, None
argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
class rv_histogram(rv_continuous):
"""
Generates a distribution given by a histogram.
This is useful to generate a template distribution from a binned
datasample.
As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
a collection of generic methods (see `rv_continuous` for the full list),
and implements them based on the properties of the provided binned
datasample.
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
Notes
-----
There are no additional shape parameters except for the loc and scale.
The pdf is defined as a stepwise function from the provided histogram
The cdf is a linear interpolation of the pdf.
.. versionadded:: 0.19.0
Examples
--------
Create a scipy.stats distribution from a numpy histogram
>>> import scipy.stats
>>> import numpy as np
>>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
>>> hist = np.histogram(data, bins=100)
>>> hist_dist = scipy.stats.rv_histogram(hist)
Behaves like an ordinary scipy rv_continuous distribution
>>> hist_dist.pdf(1.0)
0.20538577847618705
>>> hist_dist.cdf(2.0)
0.90818568543056499
PDF is zero above (below) the highest (lowest) bin of the histogram,
defined by the max (min) of the original dataset
>>> hist_dist.pdf(np.max(data))
0.0
>>> hist_dist.cdf(np.max(data))
1.0
>>> hist_dist.pdf(np.min(data))
7.7591907244498314e-05
>>> hist_dist.cdf(np.min(data))
0.0
PDF and CDF follow the histogram
>>> import matplotlib.pyplot as plt
>>> X = np.linspace(-5.0, 5.0, 100)
>>> plt.title("PDF from Template")
>>> plt.hist(data, density=True, bins=100)
>>> plt.plot(X, hist_dist.pdf(X), label='PDF')
>>> plt.plot(X, hist_dist.cdf(X), label='CDF')
>>> plt.show()
"""
_support_mask = rv_continuous._support_mask
def __init__(self, histogram, *args, **kwargs):
"""
Create a new distribution using the given histogram
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
"""
self._histogram = histogram
if len(histogram) != 2:
raise ValueError("Expected length 2 for parameter histogram")
self._hpdf = np.asarray(histogram[0])
self._hbins = np.asarray(histogram[1])
if len(self._hpdf) + 1 != len(self._hbins):
raise ValueError("Number of elements in histogram content "
"and histogram boundaries do not match, "
"expected n and n+1.")
self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
self._hcdf = np.hstack([0.0, self._hcdf])
# Set support
kwargs['a'] = self.a = self._hbins[0]
kwargs['b'] = self.b = self._hbins[-1]
super().__init__(*args, **kwargs)
def _pdf(self, x):
"""
PDF of the histogram
"""
return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
def _cdf(self, x):
"""
CDF calculated from the histogram
"""
return np.interp(x, self._hbins, self._hcdf)
def _ppf(self, x):
"""
Percentile function calculated from the histogram
"""
return np.interp(x, self._hcdf, self._hbins)
def _munp(self, n):
"""Compute the n-th non-central moment."""
integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
return np.sum(self._hpdf[1:-1] * integrals)
def _entropy(self):
"""Compute entropy of distribution"""
res = _lazywhere(self._hpdf[1:-1] > 0.0,
(self._hpdf[1:-1],),
np.log,
0.0)
return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
def _updated_ctor_param(self):
"""
Set the histogram as additional constructor argument
"""
dct = super()._updated_ctor_param()
dct['histogram'] = self._histogram
return dct
class studentized_range_gen(rv_continuous):
r"""A studentized range continuous random variable.
%(before_notes)s
See Also
--------
t: Student's t distribution
Notes
-----
The probability density function for `studentized_range` is:
.. math::
f(x; k, \nu) = \frac{k(k-1)\nu^{\nu/2}}{\Gamma(\nu/2)
2^{\nu/2-1}} \int_{0}^{\infty} \int_{-\infty}^{\infty}
s^{\nu} e^{-\nu s^2/2} \phi(z) \phi(sx + z)
[\Phi(sx + z) - \Phi(z)]^{k-2} \,dz \,ds
for :math:`x ≥ 0`, :math:`k > 1`, and :math:`\nu > 0`.
`studentized_range` takes ``k`` for :math:`k` and ``df`` for :math:`\nu`
as shape parameters.
When :math:`\nu` exceeds 100,000, an asymptotic approximation (infinite
degrees of freedom) is used to compute the cumulative distribution
function [4]_.
%(after_notes)s
References
----------
.. [1] "Studentized range distribution",
https://en.wikipedia.org/wiki/Studentized_range_distribution
.. [2] Batista, Ben Dêivide, et al. "Externally Studentized Normal Midrange
Distribution." Ciência e Agrotecnologia, vol. 41, no. 4, 2017, pp.
378-389., doi:10.1590/1413-70542017414047716.
.. [3] Harter, H. Leon. "Tables of Range and Studentized Range." The Annals
of Mathematical Statistics, vol. 31, no. 4, 1960, pp. 1122-1147.
JSTOR, www.jstor.org/stable/2237810. Accessed 18 Feb. 2021.
.. [4] Lund, R. E., and J. R. Lund. "Algorithm AS 190: Probabilities and
Upper Quantiles for the Studentized Range." Journal of the Royal
Statistical Society. Series C (Applied Statistics), vol. 32, no. 2,
1983, pp. 204-210. JSTOR, www.jstor.org/stable/2347300. Accessed 18
Feb. 2021.
Examples
--------
>>> from scipy.stats import studentized_range
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
>>> k, df = 3, 10
>>> mean, var, skew, kurt = studentized_range.stats(k, df, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(studentized_range.ppf(0.01, k, df),
... studentized_range.ppf(0.99, k, df), 100)
>>> ax.plot(x, studentized_range.pdf(x, k, df),
... 'r-', lw=5, alpha=0.6, label='studentized_range pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = studentized_range(k, df)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = studentized_range.ppf([0.001, 0.5, 0.999], k, df)
>>> np.allclose([0.001, 0.5, 0.999], studentized_range.cdf(vals, k, df))
True
Rather than using (``studentized_range.rvs``) to generate random variates,
which is very slow for this distribution, we can approximate the inverse
CDF using an interpolator, and then perform inverse transform sampling
with this approximate inverse CDF.
This distribution has an infinite but thin right tail, so we focus our
attention on the leftmost 99.9 percent.
>>> a, b = studentized_range.ppf([0, .999], k, df)
>>> a, b
0, 7.41058083802274
>>> from scipy.interpolate import interp1d
>>> rng = np.random.default_rng()
>>> xs = np.linspace(a, b, 50)
>>> cdf = studentized_range.cdf(xs, k, df)
# Create an interpolant of the inverse CDF
>>> ppf = interp1d(cdf, xs, fill_value='extrapolate')
# Perform inverse transform sampling using the interpolant
>>> r = ppf(rng.uniform(size=1000))
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
def _argcheck(self, k, df):
return (k > 1) & (df > 0)
def _fitstart(self, data):
# Default is k=1, but that is not a valid value of the parameter.
return super(studentized_range_gen, self)._fitstart(data, args=(2, 1))
def _munp(self, K, k, df):
cython_symbol = '_studentized_range_moment'
_a, _b = self._get_support()
# all three of these are used to create a numpy array so they must
# be the same shape.
def _single_moment(K, k, df):
log_const = _stats._studentized_range_pdf_logconst(k, df)
arg = [K, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
ranges = [(-np.inf, np.inf), (0, np.inf), (_a, _b)]
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_moment, 3, 1)
return np.float64(ufunc(K, k, df))
def _pdf(self, x, k, df):
cython_symbol = '_studentized_range_pdf'
def _single_pdf(q, k, df):
log_const = _stats._studentized_range_pdf_logconst(k, df)
arg = [q, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
ranges = [(-np.inf, np.inf), (0, np.inf)]
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_pdf, 3, 1)
return np.float64(ufunc(x, k, df))
def _cdf(self, x, k, df):
def _single_cdf(q, k, df):
# "When the degrees of freedom V are infinite the probability
# integral takes [on a] simpler form," and a single asymptotic
# integral is evaluated rather than the standard double integral.
# (Lund, Lund, page 205)
if df < 100000:
cython_symbol = '_studentized_range_cdf'
log_const = _stats._studentized_range_cdf_logconst(k, df)
arg = [q, k, df, log_const]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf), (0, np.inf)]
else:
cython_symbol = '_studentized_range_cdf_asymptotic'
arg = [q, k]
usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
ranges = [(-np.inf, np.inf)]
llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
opts = dict(epsabs=1e-11, epsrel=1e-12)
return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
ufunc = np.frompyfunc(_single_cdf, 3, 1)
return np.float64(ufunc(x, k, df))
studentized_range = studentized_range_gen(name='studentized_range', a=0,
b=np.inf)
# Collect names of classes and objects in this module.
pairs = list(globals().copy().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
| andyfaff/scipy | scipy/stats/_continuous_distns.py | Python | bsd-3-clause | 281,380 | [
"CRYSTAL",
"DIRAC",
"Gaussian"
] | 875c8a4212e8a331d2c256d97cd01fd42c2fa2765b04341a734d4390724e4987 |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Dict
from typing import List
from typing import Optional
import gc
import traceback
from functools import partial
from kivy.logger import Logger
from kivy.gesture import GestureDatabase
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.screenmanager import ScreenManager
from kivy.uix.screenmanager import Screen
from kivy.uix.screenmanager import FadeTransition
from kivy.uix.screenmanager import FallOutTransition
from kivy.uix.screenmanager import NoTransition
from kivy.uix.screenmanager import RiseInTransition
from kivy.uix.screenmanager import SlideTransition
from kivy.uix.screenmanager import SwapTransition
from kivy.uix.screenmanager import WipeTransition
from ORCA.Fonts import cFonts
from ORCA.ui.ShowErrorPopUp import ShowErrorPopUp
from ORCA.utils.LogError import LogError
from ORCA.utils.TypeConvert import ToUnicode
from ORCA.Skin import cSkin
from ORCA.vars.Replace import ReplaceVars
from ORCA.vars.Globals import InitSystemVars
from ORCA.vars.Actions import Var_Increase
from ORCA.ScreenPages import cScreenPages
from ORCA.vars.Access import SetVar
from ORCA.widgets.base.Base import cWidgetBase
from ORCA.actions.ReturnCode import eReturnCode
import ORCA.Globals as Globals
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ORCA.ScreenPage import cScreenPage
from ORCA.Gesture import cGesture
from ORCA.Action import cAction
else:
from typing import TypeVar
cScreenPage = TypeVar("cScreenPage")
cGesture = TypeVar("cGesture")
cAction = TypeVar("cAction")
class cTheScreen(EventDispatcher):
""" The Main screen class """
def __init__(self, *args, **kwargs):
super(cTheScreen, self).__init__(*args, **kwargs)
self.bIntransition:bool = False
self.iBlockCount:int = 0
self.iBlockAskCount:int = 0
self.iRotateCount:int = 0
self.dPopups:Dict[str,Widget] = {}
self.oCurrentPage:Optional[cScreenPage] = None
self.oFonts:cFonts = cFonts()
self.oGdb:Optional[GestureDatabase] = None
self.dGestures:Dict[str,cGesture] = {}
self.iLastWidgetPage:int = 0
self.oPopupPage:Optional[cScreenPage] = None
self.oPopup:Optional[cScreenPage] = None
self.oRootSM:ScreenManager = ScreenManager()
self.oScreenPages:cScreenPages = cScreenPages()
self.oSkin:cSkin = cSkin()
self.oSplashBackground:Screen = Screen(name="SPLASH")
self.oSplashLogger:Optional[Label] = None
self.oSplashLogger2:Optional[Label] = None
self.uCurrentEffect:str = u''
self.uCurrentEffectDirection:str = u''
self.uCurrentPageName:str = u''
self.uLastTouchType:str = u''
self.uFirstPageName:str = u''
self.uDefaultTransmitterPictureName:str = u''
self.uDefaultWaitPictureName:str = u''
self.uDefName:str = u''
self.uInterFaceToConfig:str = u''
self.uScriptToConfig:str = u''
self.uConfigToConfig:str = u''
self.uSplashText:str = u''
self.oRootSM.add_widget(self.oSplashBackground)
self.InitVars()
def InitVars(self) -> None:
""" (re) Initialises all vars (also after a definition change) """
InitSystemVars()
Globals.oDefinitions.InitVars()
SetVar(uVarName = u'REPVERSION', oVarValue = ToUnicode(Globals.iVersion))
# Name of the Current page
# List for settings dialog
self.bIntransition = False
self.dGestures.clear()
self.dPopups.clear()
self.iLastWidgetPage = 0
Globals.oActions.InitVars()
self.oCurrentPage = None
self.oFonts.DeInit()
self.oGdb = GestureDatabase()
self.oPopup = None
self.oScreenPages.DeInit()
self.uCurrentEffect = u''
self.uCurrentEffectDirection = u''
self.uCurrentPageName = u''
self.uDefaultTransmitterPictureName = u''
self.uDefaultWaitPictureName = u''
self.uDefName = u''
self.uFirstPageName = u''
self.uInterFaceToConfig = u''
self.uScriptToConfig = u''
self.uConfigToConfig = u''
self.uSplashText = u''
self.iBlockCount = 0
self.iBlockAskCount = 0
if Globals.oTheScreen:
Globals.oTheScreen.oSkin.dSkinRedirects.clear()
gc.collect()
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def DeInit(self,**kwargs) -> None:
""" Deinitialisises the screen """
Globals.oEvents.DeInit()
# noinspection PyUnusedLocal
def ShowPage(self,uPageName:str,*largs) -> eReturnCode:
""" Shows a specific page (waiting in case a transition is still in progress) """
if not self.bIntransition:
self._ShowPage(uPageName=uPageName)
return eReturnCode.Nothing
else:
Logger.debug ('Waiting to finish transition')
Clock.schedule_once(partial(self.ShowPage,uPageName),0)
return eReturnCode.Nothing
def ShowPageGetPageStartActions(self,*,uPageName:str='Page_None') -> List[cAction]:
""" Returns the pagestartactions for a specific page """
aActions:List[cAction] = Globals.oActions.GetPageStartActionList(uActionName=uPageName, bNoCopy=False)
oPage:cScreenPage = self.oScreenPages.get(uPageName)
if oPage:
oPage.Create()
return aActions
def ShowPageGetPageStopActions(self, *,uPageName:str = u'') -> List[cAction]:
""" Returns the pagestopactions for a specific page """
if uPageName==u'':
if self.oCurrentPage:
uPageName=self.oCurrentPage.uPageName
return Globals.oActions.GetPageStopActionList(uActionName = uPageName, bNoCopy = False)
def _ShowPage(self,*,uPageName:str = 'Page_None') -> bool:
oPage:cScreenPage
uPageName:str
try:
if self.uCurrentPageName==uPageName:
return True
oPage = self.oScreenPages.get(uPageName)
if oPage is None:
Logger.error(u'ShowPage: Wrong Pagename given:'+uPageName)
self.DumpPages()
uPageName = 'Page_None'
oPage = self.oScreenPages.get(uPageName)
self.oCurrentPage = oPage
else:
self.oCurrentPage = oPage
self.oScreenPages.CreatePage(uPageName=u'')
Var_Increase(uVarName = "PAGESTARTCOUNT_"+oPage.uPageName)
if self.oPopupPage:
if self.oPopupPage.oScreen in self.oRootSM.current_screen.children:
self.oRootSM.current_screen.remove_widget(self.oPopupPage.oScreen)
Logger.debug("TheScreen: Showing page: %s, called from %s" % (uPageName,self.uCurrentPageName))
oPage.iESCPressCount = 0
if not oPage.bIsPopUp:
self.oRootSM.current = uPageName
else:
self.oPopupPage=oPage
#self.oRoot.add_widget(oPage.oScreen)
self.oRootSM.current_screen.add_widget(oPage.oScreen)
oPage.uCalledByPageName = self.uCurrentPageName
self.oCurrentPage.uCallingPageName = "" # uPageName
self.uCurrentPageName = uPageName
if self.uFirstPageName==u'':
self.uFirstPageName=uPageName
self.oScreenPages.AppendToPageQueue(oPage=oPage)
oPage.SetTransmitterPicture(uTransmitterPictureName=self.uDefaultTransmitterPictureName)
oPage.SetWaitPicture(uWaitPictureName=self.uDefaultWaitPictureName)
return True
except Exception as e:
uMsg:str
uMsg = traceback.format_exc()
Logger.debug (uMsg)
Logger.debug("Rootsm:"+str(self.oRootSM))
if self.oRootSM is not None:
Logger.debug("current_screen:"+str(self.oRootSM.current_screen))
uMsg=LogError(uMsg=u'TheScreen: ShowPage: Page could not be activated:'+uPageName,oException=e)
ShowErrorPopUp(uMessage=uMsg)
return False
def IsPopup(self,*,uPageName:Optional[str]=None) -> bool:
""" Detects/returns, if a page is a popup page """
oPage:cScreenPage
uPageNameOrg:str=uPageName
if uPageName is None or uPageName=='':
oPage=self.oCurrentPage
if oPage is None:
return False
else:
uPageName=ReplaceVars(uPageName)
oPage=self.oScreenPages.get(uPageName)
if oPage is None:
Logger.debug(u'IsPopup: Wrong Pagename given: [%s/%s]' % (uPageName,uPageNameOrg))
return False
else:
return oPage.bIsPopUp
# noinspection PyUnusedLocal
def On_Transition_Complete(self, oTransition) -> None:
""" Called by the framework, when the transition has been finished, sets the flag, to stop waiting """
self.bIntransition = False
# noinspection PyUnusedLocal
def On_Transition_Started(self, oTransition,Percentage) -> None:
""" Called by the framework, when the transition has been started """
#print 'in Transition',oTransition.is_active,Percentage
if Percentage==0:
self.bIntransition = True
def SetPageEffectDirection(self,*,uDirection:str='fade') -> bool:
""" Sets the Page effect direction (in case , the effect has an direction) """
self.uCurrentEffectDirection = uDirection
try:
if ToUnicode(type(self.oRootSM.transition)).endswith(u'SlideTransition\'>') or True:
if uDirection!=u'':
self.oRootSM.transition.direction=uDirection
return True
except Exception as e:
uMsg:str=LogError(uMsg=u'TheScreen: Can not set page effect direction:' + uDirection ,oException=e)
ShowErrorPopUp(uMessage=uMsg)
return False
def SetPageEffect(self,*,uEffect:str) -> bool:
""" Sets the page effect for showing a page """
self.uCurrentEffect = uEffect
try:
if uEffect==u'':
return True
uType=ToUnicode(type(self.oRootSM.transition))
if uEffect==u'no':
self.oRootSM.transition = NoTransition()
if uEffect==u'fade':
if uType.endswith(u'FadeTransition\'>'):
return True
self.oRootSM.transition = FadeTransition()
elif uEffect==u'slide':
if uType.endswith(u'SlideTransition\'>'):
return True
self.oRootSM.transition = SlideTransition()
elif uEffect==u'wipe':
if uType.endswith(u'WipeTransition\'>'):
return True
self.oRootSM.transition = WipeTransition()
elif uEffect==u'swap':
if uType.endswith(u'SwapTransition\'>'):
return True
self.oRootSM.transition = SwapTransition()
elif uEffect==u'fallout':
if uType.endswith(u'FallOutTransition\'>'):
return True
self.oRootSM.transition = FallOutTransition()
elif uEffect==u'risein':
if uType.endswith(u'RiseInTransition\'>'):
return True
self.oRootSM.transition = RiseInTransition()
# noinspection PyArgumentList
self.oRootSM.transition.bind(on_complete=self.On_Transition_Complete)
# noinspection PyArgumentList
self.oRootSM.transition.bind(on_progress=self.On_Transition_Started)
return True
except Exception as e:
ShowErrorPopUp(uMessage=LogError(uMsg=u'TheScreen: Can not set page effect:' + uEffect,oException=e))
return False
def AddActionShowPageToQueue(self,*,uPageName:str) -> None:
""" Convinient function to show a page by the scheduler """
self.AddActionToQueue(aActions= [{'string':'showpage','pagename':uPageName}])
# noinspection PyMethodMayBeStatic
def AddActionToQueue(self,*,aActions:List[cAction], bNewQueue:bool=False) -> None:
""" Adds Actions to the scheduler """
aTmpActions=Globals.oEvents.CreateSimpleActionList(aActions=aActions)
if bNewQueue:
Globals.oEvents.ExecuteActionsNewQueue(aActions=aTmpActions,oParentWidget=None)
else:
Globals.oEvents.ExecuteActions(aActions=aTmpActions,oParentWidget=None)
def UpdateSetupWidgets(self) -> None:
""" Updates all setup / settings widgets """
for uPageName in self.oScreenPages:
self.oScreenPages[uPageName].UpdateSetupWidgets()
def FindWidgets(self,*,uPageName:str,uWidgetName:str,bIgnoreError:bool=False) -> List[cWidgetBase]:
""" Find a set widgets with a given name """
uWidgetNameRep:str
uPageNameRep:str
aPages:List[str]
aWidgets:List[cWidgetBase]
oWidget:cWidgetBase
oPage:cScreenPage
aRet:List[cWidgetBase] = []
if "@" in uWidgetName:
uWidgetName,uPageName=uWidgetName.split(u"@")
uWidgetNameRep = ReplaceVars(uWidgetName)
uPageNameRep = ReplaceVars(uPageName)
if uPageNameRep=="":
if self.oCurrentPage is not None:
uPageNameRep=self.oCurrentPage.uPageName
aPages=[]
if uPageNameRep!="*":
aPages.append(uPageNameRep)
else:
for uPageNameRep in self.oScreenPages:
aPages.append(uPageNameRep)
for uPageName in aPages:
oPage=self.oScreenPages.get(uPageName)
if oPage is None:
if not bIgnoreError:
uMsg:str=u'The Screen: Page [%s] for Widget [%s] not found:' % (uPageName,uWidgetNameRep)
Logger.error (uMsg)
else:
if uWidgetNameRep != "*":
aWidgets = oPage.dWidgets[uWidgetNameRep]
if aWidgets:
for oWidget in aWidgets:
aRet.append(oWidget)
else:
if uPageNameRep!="*":
if not bIgnoreError:
Logger.warning ("Can't find widget [%s] on page [%s]" % (uWidgetNameRep,uPageName,))
else:
for oWidget in oPage.dWidgetsID.values():
aRet.append(oWidget)
if len(aRet)==0:
if not bIgnoreError:
uMsg:str=u'The Screen: Widget not found:'+uWidgetNameRep
Logger.error (uMsg)
self.DumpWidgets(uPageNameRep)
return aRet
def DumpWidgets(self,uPageName:str):
"""
Dump all widgets
:param str uPageName: The page name of the widgets, if empty, all widgets in all pages are dumped
:return:
"""
uPageNameRep:str
aPages:List[str]
oWidget:cWidgetBase
oPage:cScreenPage
uPageNameRep = ReplaceVars(uPageName)
if uPageNameRep=="":
if self.oCurrentPage is not None:
uPageNameRep=self.oCurrentPage.uPageName
aPages=[]
if uPageNameRep!="*":
aPages.append(uPageNameRep)
else:
for uPageNameRep in self.oScreenPages:
aPages.append(uPageNameRep)
for uPageName in aPages:
oPage=self.oScreenPages.get(uPageName)
if oPage is None:
uMsg:str=u'The Screen: Dump: Page [%s]not found: ' % uPageName
Logger.error (uMsg)
else:
for oWidget in oPage.dWidgetsID.values():
Logger.debug("Widget:[%s] Page:[%s]" % (oWidget.uName,oWidget.oParentScreenPage.uPageName))
Logger.debug("")
for uWidgetName in oPage.dWidgets:
Logger.debug("Widget:[%s]" % uWidgetName)
def GuiIsBlocked(self) -> bool:
""" returns, if the Gui is Blocked"""
if self.iBlockCount>0:
Logger.debug("GUI action ignored, GUI is locked")
self.iBlockAskCount += 1
else:
self.iBlockAskCount = 0
if self.iBlockAskCount>5:
Logger.warning("Overiding locked GUI, (prevent unlocked GUI)")
self.iBlockCount = 0
self.iBlockAskCount = 0
return self.iBlockCount>0
def BlockGui(self,*,bStatus:bool) -> None:
""" Blocks or unblocks the Gui"""
if bStatus:
self.iBlockCount += 1
else:
self.iBlockCount -= 1
if self.iBlockCount<0:
Logger.warning("Unlocking mismatch, unlocking unlocked GUI")
self.iBlockCount = 0
# noinspection PyUnusedLocal
def on_motion(self, window,etype, motionevent) -> None:
""" To detect, if we still have a down touch if we missed the touch_up message so we do not want endless repeat """
self.uLastTouchType = etype
def DumpPages(self,*, uFilter:str='') -> None:
""" Dumps the names of all pages to the log file"""
Logger.error(u'Available Pages:')
for uKey in sorted(self.oScreenPages):
if uFilter=="":
Logger.error(uKey)
else:
if uFilter in uKey:
Logger.error(uKey)
| thica/ORCA-Remote | src/ORCA/Screen.py | Python | gpl-3.0 | 19,899 | [
"ORCA"
] | c1717db4971b1053d376aff3625800ea568f4a723ff327647f76573bb412c677 |
from asap3 import *
from ase.lattice.cubic import FaceCenteredCubic
pbc = False
atoms = FaceCenteredCubic(symbol='Cu', size=(10,10,10), pbc=pbc)
atoms.set_calculator(EMT())
paramfile = open("configuration.txt", "w")
print >>paramfile, int(pbc)
print >>paramfile, len(atoms)
z = atoms.get_atomic_numbers()
r = atoms.get_positions()
for i in range(len(atoms)):
print >>paramfile, z[i], r[i,0], r[i,1], r[i,2]
paramfile.close()
print "Energy:", atoms.get_potential_energy()
| auag92/n2dm | Asap-3.8.4/OpenKIMexport/test/makeinput.py | Python | mit | 482 | [
"ASE"
] | 6937acf0937df2ea3c4e051495cd60f601e33e90632bdd677a78fcfa20ac0fa5 |
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
"""Extension to python-markdown to support LaTeX (rather than html) output.
Authored by Rufus Pollock: <http://www.rufuspollock.org/>
Reworked by Julian Wulfheide (ju.wulfheide@gmail.com) and
Indico Project (indico-team@cern.ch)
Usage:
======
1. Command Line. A script entitled markdown2latex.py is automatically
installed. For details of usage see help::
$ markdown2latex.py -h
2. As a python-markdown extension::
>>> import markdown
>>> md = markdown.Markdown(None, extensions=['latex'])
>>> # text is input string ...
>>> latex_out = md.convert(text)
3. Directly as a module (slight inversion of std markdown extension setup)::
>>> import markdown
>>> import mdx_latex
>>> md = markdown.Markdown()
>>> latex_mdx = mdx_latex.LaTeXExtension()
>>> latex_mdx.extendMarkdown(md, markdown.__dict__)
>>> out = md.convert(text)
History
=======
Version: 1.0 (November 15, 2006)
* First working version (compatible with markdown 1.5)
* Includes support for tables
Version: 1.1 (January 17, 2007)
* Support for verbatim and images
Version: 1.2 (June 2008)
* Refactor as an extension.
* Make into a proper python/setuptools package.
* Tested with markdown 1.7 but should work with 1.6 and (possibly) 1.5
(though pre/post processor stuff not as worked out there)
Version 1.3: (July 2008)
* Improvements to image output (width)
Version 1.3.1: (August 2009)
* Tiny bugfix to remove duplicate keyword argument and set zip_safe=False
* Add [width=\textwidth] by default for included images
Version 2.0: (June 2011)
* PEP8 cleanup
* Major rework since this was broken by new Python-Markdown releases
Version 2.1: (August 2013)
* Add handler for non locally referenced images, hyperlinks and horizontal rules
* Update math delimiters
"""
import os
import re
import textwrap
import uuid
from io import BytesIO
from mimetypes import guess_extension
from tempfile import NamedTemporaryFile
from urllib.parse import urljoin, urlparse
from xml.etree import ElementTree as etree
import markdown
import requests
from lxml.html import html5parser
from PIL import Image
from requests.exceptions import ConnectionError, InvalidURL
__version__ = '2.1'
start_single_quote_re = re.compile(r"""(^|\s|")'""")
start_double_quote_re = re.compile(r'''(^|\s|'|`)"''')
end_double_quote_re = re.compile(r'"(,|\.|\s|$)')
Image.init()
IMAGE_FORMAT_EXTENSIONS = {format: ext for (ext, format) in Image.EXTENSION.items()}
safe_mathmode_commands = {
'above', 'abovewithdelims', 'acute', 'aleph', 'alpha', 'amalg', 'And', 'angle', 'approx', 'arccos', 'arcsin',
'arctan', 'arg', 'array', 'Arrowvert', 'arrowvert', 'ast', 'asymp', 'atop', 'atopwithdelims', 'backslash',
'backslash', 'bar', 'Bbb', 'begin', 'beta', 'bf', 'Big', 'big', 'bigcap', 'bigcirc', 'bigcup', 'Bigg', 'bigg',
'Biggl', 'biggl', 'Biggm', 'biggm', 'Biggr', 'biggr', 'Bigl', 'bigl', 'Bigm', 'bigm', 'bigodot', 'bigoplus',
'bigotimes', 'Bigr', 'bigr', 'bigsqcup', 'bigtriangledown', 'bigtriangleup', 'biguplus', 'bigvee', 'bigwedge',
'bmod', 'bot', 'bowtie', 'brace', 'bracevert', 'brack', 'breve', 'buildrel', 'bullet', 'cap', 'cases', 'cdot',
'cdotp', 'cdots', 'check', 'chi', 'choose', 'circ', 'clubsuit', 'colon', 'cong', 'coprod', 'cos', 'cosh', 'cot',
'coth', 'cr', 'csc', 'cup', 'dagger', 'dashv', 'ddagger', 'ddot', 'ddots', 'deg', 'Delta', 'delta', 'det',
'diamond', 'diamondsuit', 'dim', 'displaylines', 'displaystyle', 'div', 'dot', 'doteq', 'dots', 'dotsb', 'dotsc',
'dotsi', 'dotsm', 'dotso', 'Downarrow', 'downarrow', 'ell', 'emptyset', 'end', 'enspace', 'epsilon', 'eqalign',
'eqalignno', 'equiv', 'eta', 'exists', 'exp', 'fbox', 'flat', 'forall', 'frac', 'frak', 'frown', 'Gamma', 'gamma',
'gcd', 'ge', 'geq', 'gets', 'gg', 'grave', 'gt', 'gt', 'hat', 'hbar', 'hbox', 'hdashline', 'heartsuit', 'hline',
'hom', 'hookleftarrow', 'hookrightarrow', 'hphantom', 'hskip', 'hspace', 'Huge', 'huge', 'iff', 'iiint', 'iint',
'Im', 'imath', 'in', 'inf', 'infty', 'int', 'intop', 'iota', 'it', 'jmath', 'kappa', 'ker', 'kern', 'Lambda',
'lambda', 'land', 'langle', 'LARGE', 'Large', 'large', 'LaTeX', 'lbrace', 'lbrack', 'lceil', 'ldotp', 'ldots', 'le',
'left', 'Leftarrow', 'leftarrow', 'leftharpoondown', 'leftharpoonup', 'Leftrightarrow', 'leftrightarrow',
'leftroot', 'leq', 'leqalignno', 'lfloor', 'lg', 'lgroup', 'lim', 'liminf', 'limits', 'limsup', 'll', 'llap',
'lmoustache', 'ln', 'lnot', 'log', 'Longleftarrow', 'longleftarrow', 'Longleftrightarrow', 'longleftrightarrow',
'longmapsto', 'Longrightarrow', 'longrightarrow', 'lor', 'lower', 'lt', 'lt', 'mapsto', 'mathbb', 'mathbf',
'mathbin', 'mathcal', 'mathclose', 'mathfrak', 'mathinner', 'mathit', 'mathop', 'mathopen', 'mathord', 'mathpunct',
'mathrel', 'mathrm', 'mathscr', 'mathsf', 'mathstrut', 'mathtt', 'matrix', 'max', 'mbox', 'mid', 'middle', 'min',
'mit', 'mkern', 'mod', 'models', 'moveleft', 'moveright', 'mp', 'mskip', 'mspace', 'mu', 'nabla', 'natural', 'ne',
'nearrow', 'neg', 'negthinspace', 'neq', 'newline', 'ni', 'nolimits', 'normalsize', 'not', 'notin', 'nu', 'nwarrow',
'odot', 'oint', 'oldstyle', 'Omega', 'omega', 'omicron', 'ominus', 'oplus', 'oslash', 'otimes', 'over', 'overbrace',
'overleftarrow', 'overleftrightarrow', 'overline', 'overrightarrow', 'overset', 'overwithdelims', 'owns',
'parallel', 'partial', 'perp', 'phantom', 'Phi', 'phi', 'Pi', 'pi', 'pm', 'pmatrix', 'pmb', 'pmod', 'pod', 'Pr',
'prec', 'preceq', 'prime', 'prod', 'propto', 'Psi', 'psi', 'qquad', 'quad', 'raise', 'rangle', 'rbrace', 'rbrack',
'rceil', 'Re', 'rfloor', 'rgroup', 'rho', 'right', 'Rightarrow', 'rightarrow', 'rightharpoondown', 'rightharpoonup',
'rightleftharpoons', 'rlap', 'rm', 'rmoustache', 'root', 'S', 'scr', 'scriptscriptstyle', 'scriptsize',
'scriptstyle', 'searrow', 'sec', 'setminus', 'sf', 'sharp', 'Sigma', 'sigma', 'sim', 'simeq', 'sin', 'sinh', 'skew',
'small', 'smallint', 'smash', 'smile', 'Space', 'space', 'spadesuit', 'sqcap', 'sqcup', 'sqrt', 'sqsubseteq',
'sqsupseteq', 'stackrel', 'star', 'strut', 'subset', 'subseteq', 'succ', 'succeq', 'sum', 'sup', 'supset',
'supseteq', 'surd', 'swarrow', 'tan', 'tanh', 'tau', 'TeX', 'text', 'textbf', 'textit', 'textrm', 'textsf',
'textstyle', 'texttt', 'Theta', 'theta', 'thinspace', 'tilde', 'times', 'tiny', 'to', 'top', 'triangle',
'triangleleft', 'triangleright', 'tt', 'underbrace', 'underleftarrow', 'underleftrightarrow', 'underline',
'underrightarrow', 'underset', 'Uparrow', 'uparrow', 'Updownarrow', 'updownarrow', 'uplus', 'uproot', 'Upsilon',
'upsilon', 'varepsilon', 'varphi', 'varpi', 'varrho', 'varsigma', 'vartheta', 'vcenter', 'vdash', 'vdots', 'vec',
'vee', 'Vert', 'vert', 'vphantom', 'wedge', 'widehat', 'widetilde', 'wp', 'wr', 'Xi', 'xi', 'zeta', '\\'
}
class ImageURLException(Exception):
pass
def unescape_html_entities(text):
out = text.replace('&', '&')
out = out.replace('<', '<')
out = out.replace('>', '>')
out = out.replace('"', '"')
return out
def latex_escape(text, ignore_math=True, ignore_braces=False):
if text is None:
return ''
chars = {
'#': r'\#',
'$': r'\$',
'%': r'\%',
'&': r'\&',
'~': r'\~{}',
'_': r'\_',
'^': r'\^{}',
'\\': r'\textbackslash{}',
'\x0c': '',
'\x0b': ''
}
if not ignore_braces:
chars.update({
'{': r'\{',
'}': r'\}'})
math_segments = []
def substitute(x):
return chars[x.group()]
math_placeholder = f'[*LaTeXmath-{str(uuid.uuid4())}*]'
def math_replace(m):
math_segments.append(m.group(0))
return math_placeholder
if ignore_math:
# Extract math-mode segments and replace with placeholder
text = re.sub(r'\$[^\$]+\$|\$\$(^\$)\$\$', math_replace, text)
pattern = re.compile('|'.join(re.escape(k) for k in chars.keys()))
res = pattern.sub(substitute, text)
if ignore_math:
# Sanitize math-mode segments and put them back in place
math_segments = list(map(sanitize_mathmode, math_segments))
res = re.sub(re.escape(math_placeholder), lambda _: '\\protect ' + math_segments.pop(0), res)
return res
def sanitize_mathmode(text):
def _escape_unsafe_command(m):
command = m.group(1)
return m.group(0) if command in safe_mathmode_commands else r'\\' + command
return re.sub(r'\\([a-zA-Z]+|\\)', _escape_unsafe_command, text)
def escape_latex_entities(text):
"""Escape latex reserved characters."""
out = text
out = unescape_html_entities(out)
out = start_single_quote_re.sub(r'\g<1>`', out)
out = start_double_quote_re.sub(r'\g<1>``', out)
out = end_double_quote_re.sub(r"''\g<1>", out)
out = latex_escape(out)
return out
def unescape_latex_entities(text):
"""Limit ourselves as this is only used for maths stuff."""
out = text
out = out.replace('\\&', '&')
return out
def latex_render_error(message):
"""Generate nice error box in LaTeX document.
:param message: The error message
:returns: LaTeX code for error box
"""
return textwrap.dedent(r'''
\begin{tcolorbox}[width=\textwidth,colback=red!5!white,colframe=red!75!black,title={Indico rendering error}]
\begin{verbatim}%s\end{verbatim}
\end{tcolorbox}''' % latex_escape(message))
def latex_render_image(src, alt, tmpdir, strict=False):
"""Generate LaTeX code that includes an arbitrary image from a URL.
This involves fetching the image from a web server and figuring out its
MIME type. A temporary file will be created, which is not immediately
deleted since it has to be included in the LaTeX code. It should be handled
by the enclosing code.
:param src: source URL of the image
:param alt: text to use as ``alt="..."``
:param tmpdir: the directory where to put any temporary files
:param strict: whether a faulty URL should break the whole process
:returns: a ``(latex_code, file_path)`` tuple, containing the LaTeX code
and path to the temporary image file.
"""
from indico.core.config import config
try:
info = urlparse(src)
if not info.scheme and not info.netloc and info.path.startswith('/'):
# make relative links absolute
src = urljoin(config.BASE_URL, src)
if urlparse(src).scheme not in ('http', 'https'):
raise ImageURLException(f'URL scheme not supported: {src}')
else:
try:
resp = requests.get(src, verify=False, timeout=5)
except InvalidURL:
raise ImageURLException(f"Cannot understand URL '{src}'")
except (requests.Timeout, ConnectionError):
raise ImageURLException(f'Problem downloading image ({src})')
except requests.TooManyRedirects:
raise ImageURLException(f'Too many redirects downloading image ({src})')
extension = None
if resp.status_code != 200:
raise ImageURLException(f'[{resp.status_code}] Error fetching image')
if resp.headers.get('content-type'):
extension = guess_extension(resp.headers['content-type'])
# as incredible as it might seem, '.jpe' will be the answer in some Python environments
if extension == '.jpe':
extension = '.jpg'
if not extension:
try:
# Try to use PIL to get file type
image = Image.open(BytesIO(resp.content))
# Worst case scenario, assume it's PNG
extension = IMAGE_FORMAT_EXTENSIONS.get(image.format, '.png')
except OSError:
raise ImageURLException('Cannot read image data. Maybe not an image file?')
with NamedTemporaryFile(prefix='indico-latex-', suffix=extension, dir=tmpdir, delete=False) as tempfile:
tempfile.write(resp.content)
except ImageURLException as exc:
if strict:
raise
else:
return latex_render_error(f'Could not include image: {exc}'), None
# Using graphicx and ajustbox package for *max width*
return (textwrap.dedent(r'''
\begin{figure}[H]
\centering
\includegraphics[max width=\linewidth]{%s}
\caption{%s}
\end{figure}
''' % (os.path.basename(tempfile.name), latex_escape(alt))), tempfile.name)
def makeExtension(configs=None):
return LaTeXExtension(configs=configs)
class LaTeXExtension(markdown.Extension):
def __init__(self, configs=None):
self.configs = configs
self.reset()
def extendMarkdown(self, md, md_globals):
self.md = md
# remove escape pattern -- \\(.*) -- as this messes up any embedded
# math and we don't need to escape stuff any more for html
self.md.inlinePatterns.deregister('escape')
latex_tp = LaTeXTreeProcessor(self.configs)
math_pp = MathTextPostProcessor()
link_pp = LinkTextPostProcessor()
unescape_html_pp = UnescapeHtmlTextPostProcessor()
md.treeprocessors.register(latex_tp, 'latex', md.treeprocessors._priority[-1].priority - 1)
md.postprocessors.register(unescape_html_pp, 'unescape_html', md.postprocessors._priority[-1].priority - 1)
md.postprocessors.register(math_pp, 'math', md.postprocessors._priority[-1].priority - 1)
md.postprocessors.register(link_pp, 'link', md.postprocessors._priority[-1].priority - 1)
# Needed for LaTeX postprocessors not to choke on URL-encoded urls
md.inlinePatterns.register(NonEncodedAutoMailPattern(markdown.inlinepatterns.AUTOMAIL_RE, md), 'automail', 110)
def reset(self):
pass
class NonEncodedAutoMailPattern(markdown.inlinepatterns.Pattern):
"""Reimplementation of AutoMailPattern to avoid URL-encoded links."""
def handleMatch(self, m):
el = etree.Element('a')
email = self.unescape(m.group(2))
email.removeprefix('mailto:')
el.text = markdown.util.AtomicString(''.join(email))
el.set('href', f'mailto:{email}')
return el
class LaTeXTreeProcessor(markdown.treeprocessors.Treeprocessor):
def __init__(self, configs):
self.configs = configs
def run(self, doc):
"""
Walk the dom converting relevant nodes to text nodes with relevant
content.
"""
latex_text = self.tolatex(doc)
doc.clear()
doc.text = latex_text
def tolatex(self, ournode):
buffer = ''
subcontent = ''
if ournode.text:
subcontent += escape_latex_entities(ournode.text)
for child in ournode:
subcontent += self.tolatex(child)
if ournode.tag == 'h1':
buffer += '\n\n\\section{%s}\n' % subcontent
elif ournode.tag == 'h2':
buffer += '\n\n\\subsection{%s}\n' % subcontent
elif ournode.tag == 'h3':
buffer += '\n\\subsubsection{%s}\n' % subcontent
elif ournode.tag == 'h4':
buffer += '\n\\paragraph{%s}\n' % subcontent
elif ournode.tag == 'hr':
buffer += r'\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}'
elif ournode.tag == 'ul':
# no need for leading \n as one will be provided by li
buffer += '''
\\begin{itemize}%s
\\end{itemize}
''' % subcontent
elif ournode.tag == 'ol':
# no need for leading \n as one will be provided by li
buffer += '''
\\begin{enumerate}%s
\\end{enumerate}
''' % subcontent
elif ournode.tag == 'li':
buffer += '''
\\item %s''' % subcontent.strip()
elif ournode.tag == 'blockquote':
# use quotation rather than quote as quotation can support multiple
# paragraphs
buffer += '''
\\begin{quotation}
%s
\\end{quotation}
''' % subcontent.strip()
# ignore 'code' when inside pre tags
# (mkdn produces <pre><code></code></pre>)
elif (ournode.tag == 'pre' or (ournode.tag == 'pre' and ournode.parentNode.tag != 'pre')):
buffer += '''
\\begin{verbatim}
%s
\\end{verbatim}
''' % subcontent.strip()
elif ournode.tag == 'q':
buffer += "`%s'" % subcontent.strip()
elif ournode.tag == 'p':
if self.configs.get('apply_br'):
subcontent = subcontent.replace('\n', '\\\\\\relax\n')
buffer += '\n%s\n' % subcontent.strip()
elif ournode.tag == 'strong':
buffer += '\\textbf{%s}' % subcontent.strip()
elif ournode.tag == 'em':
buffer += '\\emph{%s}' % subcontent.strip()
elif ournode.tag in ('table', 'thead', 'tbody', 'tr', 'th', 'td'):
raise RuntimeError('Unexpected table in markdown data for LaTeX')
elif ournode.tag == 'img':
buffer += latex_render_image(ournode.get('src'), ournode.get('alt'), tmpdir=self.configs.get('tmpdir'))[0]
elif ournode.tag == 'a':
# this one gets escaped in convert_link_to_latex
buffer += '<a href="{}">{}</a>'.format(ournode.get('href'), subcontent)
else:
buffer = subcontent
if ournode.tail:
buffer += escape_latex_entities(ournode.tail)
return buffer
class UnescapeHtmlTextPostProcessor(markdown.postprocessors.Postprocessor):
def run(self, text):
return unescape_html_entities(text)
# ========================= MATH =================================
class MathTextPostProcessor(markdown.postprocessors.Postprocessor):
def run(self, instr):
"""
Convert all math sections in {text} whether latex, asciimathml or
latexmathml formatted to latex.
This assumes you are using $$ as your mathematics delimiter (*not* the
standard asciimathml or latexmathml delimiter).
"""
def repl_1(matchobj):
text = unescape_latex_entities(matchobj.group(1))
tmp = text.strip()
if tmp.startswith('\\[') or tmp.startswith('\\begin'):
return text
else:
return '\\[%s\\]\n' % text
def repl_2(matchobj):
text = unescape_latex_entities(matchobj.group(1))
return f'${text}${matchobj.group(2)}'
# $$ ..... $$
pat = re.compile(r'^\$\$([^$]*)\$\$\s*$', re.MULTILINE)
out = pat.sub(repl_1, instr)
# Jones, $x=3$, is ...
pat3 = re.compile(r'\$([^$]+)\$(\s|$)')
out = pat3.sub(repl_2, out)
# # $100 million
# pat2 = re.compile('([^\$])\$([^\$])')
# out = pat2.sub('\g<1>\\$\g<2>', out)
# some extras due to asciimathml
# out = out.replace('\\lt', '<')
# out = out.replace(' * ', ' \\cdot ')
# out = out.replace('\\del', '\\partial')
return out
# ========================== LINKS =================================
class LinkTextPostProcessor(markdown.postprocessors.Postprocessor):
def run(self, instr):
new_blocks = [re.sub(r'<a[^>]*>([^<]+)</a>', lambda m: convert_link_to_latex(m.group(0)).strip(), block)
for block in instr.split('\n\n')]
return '\n\n'.join(new_blocks)
def convert_link_to_latex(instr):
dom = html5parser.fragment_fromstring(instr)
return '\\href{%s}{%s}' % (latex_escape(dom.get('href'), ignore_math=True), dom.text)
| indico/indico | indico/util/mdx_latex.py | Python | mit | 19,890 | [
"Bowtie"
] | 4b6194ef809428a867a9092edd3ea659f9ffe263eaad812b594e3d0aae729182 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@package Contavect
@brief **Main Class of Contavect program**. Contains The Main class and top level instructions
@copyright [GNU General Public License v2](http://www.gnu.org/licenses/gpl-2.0.html)
@author Adrien Leger - 2014
* <adrien.leger@gmail.com>
* <adrien.leger@inserm.fr>
* <adrien.leger@univ-nantes.fr>
* [Github](https://github.com/a-slide)
* [Atlantic Gene Therapies - INSERM 1089] (http://www.atlantic-gene-therapies.fr/)
"""
# TODO Raise flag if a facultative package is not properly imported
# IMPORTS
try:
# Standard library packages import
from os import path, remove # Mandatory package
from time import time # Mandatory package
import ConfigParser # Mandatory package
from sys import argv # Mandatory package
import csv # Mandatory package
# Third party packages
import pysam # Mandatory package
import Bio # Mandatory package
# Local Package import
from pyDNA.Utilities import mkdir, file_basename, file_name, expand_file, rm_blank, is_gziped # Mandatory package
from pyDNA.Blast import Blastn # if not imported = not ref masking
from pyDNA.RefMasker import mask # if not imported = not ref masking
from pyDNA.FastqFT.FastqFilter import FastqFilter # if not imported = not fasta filter
from pyDNA.FastqFT.QualityFilter import QualityFilter # if not imported = not fasta filter
from pyDNA.FastqFT.AdapterTrimmer import AdapterTrimmer # if not imported = not fasta filter
from pyDNA.Ssw import ssw_wrap # if not imported = not fasta filter
from pyDNA.Bwa import Mem # Mandatory package
from pyDNA.pySamTools import Bam, Coverage, Variant # if not imported = not requested output
from ContaVect_src.Reference import Reference, Sequence # Mandatory package
except ImportError as E:
print (E)
print ("Please verify your dependencies. See Readme for more informations\n")
exit()
#~~~~~~~MAIN FUNCTION~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class Main(object):
"""
Main class of the program. In a first time the class is initialize with values parsed from the
configuration file. Then, the pipeline of analysis is launch through the 'run' method
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~FONDAMENTAL METHODS~~~~~~~#
def __init__ (self):
"""
Parse command line argument and configuration file, then verify a limited number of
critical values.
"""
# TODO manage import flags define dafault values and verify file existence
# CL ARGUMENTS AND CONF FILE PARSING
if len(argv) != 2:
print ("Please provide the path to the Configuration file as an unique argument\n")
exit()
self.conf = ConfigParser.RawConfigParser(allow_no_value=True)
self.conf.read(argv[1])
if not self.conf.sections():
print ("Empty or invalid configuration file. See Readme for more informations\n")
exit()
try:
# Mandatory paramaters
self.outdir = rm_blank(self.conf.get("General", "outdir"), replace="_")
if not self.outdir:
self.outdir = "./"
if self.outdir[-1] != "/":
self.outdir += "/"
self.outprefix = rm_blank(self.conf.get("General", "outprefix"), replace="_")
if not self.outdir:
self.outdir = "out"
self.ref_masking = self.conf.getboolean("Ref_Masking", "ref_masking")
self.R1 = rm_blank(self.conf.get("Fastq", "R1"), replace="\ ")
self.R2 = rm_blank(self.conf.get("Fastq", "R2"), replace="\ ")
self.input_qual = self.conf.get("Fastq_Filtering", "input_qual")
self.quality_filtering = self.conf.getboolean("Fastq_Filtering", "quality_filtering")
self.adapter_trimming = self.conf.getboolean("Fastq_Filtering", "adapter_trimming")
self.bwa_index = rm_blank(self.conf.get("Bwa_Alignment", "bwa_index"), replace="\ ")
self.bwa_mem_opt = self.conf.get("Bwa_Alignment", "bwa_mem_opt")
self.bwa_threads = self.conf.get("Bwa_Alignment", "bwa_threads")
self.bwa_index_opt = self.conf.get("Bwa_Alignment", "bwa_index_opt")
self.bwa_aligner = self.conf.get("Bwa_Alignment", "bwa_aligner")
self.bwa_indexer = self.conf.get("Bwa_Alignment", "bwa_indexer")
self.min_mapq = self.conf.getint("Output", "min_mapq")
self.min_size = self.conf.getint("Output", "min_size")
self.unmapped_bam = self.conf.getboolean("Output", "unmapped_bam")
self.unmapped_sam = self.conf.getboolean("Output", "unmapped_sam")
self.cov_min_depth = self.conf.getint("Output", "cov_min_depth")
self.var_min_depth = self.conf.getint("Output", "var_min_depth")
self.var_min_freq = self.conf.getfloat("Output", "var_min_freq")
# Conditional paramaters
if self.ref_masking:
self.blastn_opt = self.conf.get("Ref_Masking", "blastn_opt")
self.blastn_threads = self.conf.get("Ref_Masking", "blastn_threads")
self.mkblastdb_opt = self.conf.get("Ref_Masking", "mkblastdb_opt")
self.blastn = self.conf.get("Ref_Masking", "blastn")
self.mkblastdb = self.conf.get("Ref_Masking", "mkblastdb")
if self.quality_filtering:
self.min_qual = self.conf.getint("Fastq_Filtering", "min_qual")
if self.adapter_trimming:
self.adapters = self.conf.get("Fastq_Filtering", "adapters").split()
self.find_rc = self.conf.getboolean("Fastq_Filtering", "find_rc")
self.min_read_len = self.conf.getfloat("Fastq_Filtering", "min_read_len")
self.min_match_len = self.conf.getfloat("Fastq_Filtering", "min_match_len")
self.min_match_score = self.conf.getfloat("Fastq_Filtering", "min_match_score")
self.ssw_match = self.conf.getint("Fastq_Filtering", "ssw_match")
self.ssw_mismatch = self.conf.getint("Fastq_Filtering", "ssw_mismatch")
self.ssw_gap_open = self.conf.getint("Fastq_Filtering", "ssw_gap_open")
self.ssw_gap_extend = self.conf.getint("Fastq_Filtering", "ssw_gap_extend")
# More complicated import in a list of dictionnary for references informations
self.raw_ref_list =[]
for i in range(1,100):
ref_id = "Ref"+str(i)
if not self.conf.has_section(ref_id):
break
ref = { 'name' : rm_blank(self.conf.get(ref_id, "name"), replace="_"),
'fasta' : rm_blank(self.conf.get(ref_id, "fasta"), replace="\ "),
'output' : self.conf.get(ref_id, "output").split(),}
self.raw_ref_list.append(ref)
except ConfigParser.NoOptionError as E:
print (E)
print ("An option is missing in the configuration file")
print ("Please report to the descriptions in the configuration file\n")
exit()
except ConfigParser.NoSectionError as E:
print (E)
print ("An section is missing in the configuration file")
print ("Please report to the descriptions in the configuration file\n")
exit()
except ValueError as E:
print (E)
print ("One of the value in the configuration file is not in the correct format")
print ("Please report to the descriptions in the configuration file\n")
exit()
def __repr__(self):
msg = "MAIN CLASS\n"
msg+= "\tParameters list\n"
for i, j in self.__dict__.items():
msg+="\t{}\t{}\n".format(i, j)
return (msg)
def __str__(self):
return "<Instance of {} from {} >\n".format(self.__class__.__name__, self.__module__)
def get(self, key):
return self.__dict__[key]
def set(self, key, value):
self.__dict__[key] = value
#~~~~~~~PUBLIC METHODS~~~~~~~#
def __call__(self):
"""
Launch the complete pipeline of analyse:
* Reference importation/parsing
* Facultative step of reference masking to remove homologies between reference sequences
* Facultative step of Fastq quality Filtering/ adapter trimming
* Facultative step of reference indexing for bwa from merged references
* Short read alignment with bwa mem
* Spliting of sam to attribute reads to each original references (or unmmapped)
* Output per reference bam, sam, bedgraph, bed, covgraph, variant call
* Output distribution table and graph
"""
stime = time()
self.outdir = mkdir(path.abspath(self.outdir))
print ("\n##### PARSE REFERENCES #####\n")
# Create CV_Reference.Reference object for each reference easily accessible through
# Reference class methods
if self.ref_masking or not self.bwa_index:
self.ref_dir = mkdir(path.join(self.outdir, "references/"))
self.index_dir = mkdir(path.join(self.outdir, "bwa_index/"))
self._extract_ref(expand=True)
else:
self.ref_dir = ""
self.index_dir = ""
self._extract_ref(expand=False)
# Reference Masking
if self.ref_masking:
print ("\n##### REFERENCE HOMOLOGIES MASKING #####\n")
self.db_dir = mkdir(path.join(self.outdir, "blast_db/"))
ref_list = self._iterative_masker()
# Erase existing index value if ref masking was performed
bwa_index = None
# Fastq Filtering
if self.quality_filtering or self.adapter_trimming:
print ("\n##### FASTQ FILTERING #####\n")
self.fastq_dir = mkdir(path.join(self.outdir, "fastq/"))
self.R1, self.R2 = self._fastq_filter()
# BWA alignment
print ("\n##### READ REFERENCES AND ALIGN WITH BWA #####\n")
# An index will be generated if no index was provided
self.result_dir = mkdir(path.join(self.outdir, "results/"))
self.sam = Mem.align (
self.R1, self.R2,
index = self.bwa_index,
ref = Reference.allFasta(),
align_opt = self.bwa_mem_opt,
index_opt = self.bwa_index_opt,
aligner = self.bwa_aligner,
align_threads = self.bwa_threads,
indexer = self.bwa_indexer,
align_outdir = self.result_dir,
index_outdir = self.index_dir,
align_outname = self.outprefix+".sam",
index_outname = self.outprefix+".idx")
print ("\n##### FILTER ALIGNED READS AND ASSIGN A REFERENCE #####\n")
# Split the output sam file according to each reference
self._sam_spliter ()
print ("\n##### GENERATE OUTPUT FOR EACH REFERENCE #####\n")
# Deal with garbage read dictionnary
self._garbage_output()
# Ask references to generate the output they were configured to
Reference.mk_output_global(self.result_dir+self.outprefix)
# Create a distribution table
self._distribution_output()
self._make_report()
print ("\n##### DONE #####\n")
print ("Total execution time = {}s".format(round(time()-stime, 2)))
##~~~~~~~PRIVATE METHODS~~~~~~~#
def _extract_ref(self, expand=True):
"""
Import and expand fasta references and associated flags in a Reference object
expand the file if Gziped to avoid multiple compression/decompression during execution
if require for next operations
"""
for ref in self.raw_ref_list:
# Expand fasta if needed
if expand:
ref_fasta = expand_file(infile=ref['fasta'], outdir=self.ref_dir)
else:
ref_fasta = ref['fasta']
# Create a Reference object
Ref = Reference(
name = ref['name'],
ref_fasta = ref_fasta,
bam_maker = Bam.BamMaker(
make_bam = 'bam' in ref['output'],
make_sam = 'sam' in ref['output']),
cov_maker = Coverage.CoverageMaker(
min_depth=self.cov_min_depth,
make_bedgraph = 'bedgraph' in ref['output'],
make_bed = 'bed' in ref['output'],
make_covgraph = 'covgraph' in ref['output']),
var_maker = Variant.VariantMaker(
min_depth=self.var_min_depth,
min_freq=self.var_min_freq,
make_freqvar = 'variant' in ref['output']))
## Test if all seq in ref are longer than 3000 for compatibility with bwa
#for seq in Ref.seq_dict.values():
#if seq.length < 3000:
#import_and_pad (
print (repr(Ref))
def _iterative_masker (self): #### TODO The fuction directly manipulate reference field= change that
"""
Mask references homologies iteratively, starting by the last reference which is masked by
all the others then to the penultimate masked by all others except the last and and so
forth until there is only 1 reference remaining
"""
# Iterate over index in Reference.instances staring by the last one until the 2nd one
for i in range(Reference.countInstances()-1, 0, -1):
# Extract subject and query_list from ref_list
subject = Reference.Instances[i]
query_list = Reference.Instances[0:i]
print ("\n# PROCESSING REFERENCE {} #\n".format(subject.name))
# Perform a blast of query list against subject
hit_list = Blastn.align (
query_list = [ref.ref_fasta for ref in query_list],
subject_fasta = subject.ref_fasta,
align_opt = self.blastn_opt,
num_threads = self.blastn_threads,
db_opt = self.mkblastdb_opt,
db_outdir = self.db_dir,
db_outname = subject.name)
# Masking hits in suject fasta if hits in hit_list
subject.ref_fasta = mask (
subject_fasta= subject.ref_fasta,
hit_list = hit_list,
ref_outdir = self.ref_dir,
ref_outname = "masked_{}.fa".format(subject.name),
compress_ouput = False)
def _fastq_filter (self):
"""
Filter fastq with FastqFilterPP
"""
# Define a quality filter object
if self.quality_filtering:
self.qFilter = QualityFilter (self.min_qual)
else:
self.qFilter = None
# Define a adapter trimmer object
if self.adapter_trimming:
self.ssw_aligner = ssw_wrap.Aligner(
match = self.ssw_match,
mismatch = self.ssw_mismatch,
gap_open = self.ssw_gap_open,
gap_extend = self.ssw_gap_extend)
self.trimmer = AdapterTrimmer(
Aligner = self.ssw_aligner,
adapters = self.adapters,
find_rc = self.find_rc,
min_read_len = self.min_read_len,
min_match_len = self.min_match_len,
min_match_score = self.min_match_score)
else:
self.trimmer = None
# Filter fastq for quality and adapter with FastqFilter
self.fFilter = FastqFilter (
self.R1, self.R2,
quality_filter = self.qFilter,
adapter_trimmer = self.trimmer,
outdir = self.fastq_dir,
input_qual = self.input_qual,
compress_output=False)
# Print a simple result
print ("Pairs processed: {}\t Pairs passed : {}\t in {} s".format(
self.fFilter.getCTypeVal('total'),
self.fFilter.getCTypeVal('total_pass'),
self.fFilter.get('exec_time')))
# Write a detailed report in a logfile
output = "{}{}_FastqFilter_report.txt".format(self.fastq_dir, self.outprefix)
with open (output, "wb") as outfile:
outfile.write(repr(self.fFilter))
return self.fFilter.getTrimmed()
def _sam_spliter (self):
"""
"""
with pysam.Samfile(self.sam, "r" ) as samfile:
self.bam_header = samfile.header
# Give the header of the sam file to all Reference.Instances to respect the same order
# references in sorted bam files
Reference.set_global("bam_header", self.bam_header)
# Create a dict to collect unmapped and low quality reads
Secondary = Sequence (name = 'Secondary', length = 0)
Unmapped = Sequence (name = 'Unmapped', length = 0)
LowMapq = Sequence (name = 'LowMapq', length = 0)
self.garbage_read = [Secondary, Unmapped, LowMapq]
for read in samfile:
# Always remove secondary alignments
if read.is_secondary:
Secondary.add_read(read)
# Filter Unmapped reads
elif read.tid == -1:
Unmapped.add_read(read)
# Filter Low MAPQ reads
elif read.mapq < self.min_mapq:
LowMapq.add_read(read)
# Filter short map ##### FOR FUTURE CREATE A SEPARATE CATEGORY
elif len(read.query_alignment_sequence) < self.min_size:
Unmapped.add_read(read)
# Finally if all is fine attribute the read to a Reference
else:
Reference.addRead(samfile.getrname(read.tid), read)
# Removing the original sam file which is no longer needed
remove(self.sam)
self.sam = None
def _garbage_output (self):
"""
Output bam /sam for garbage reads
"""
# Define a generic Bam.BamMaker resulting in unsorted bam/sam for all garbage reads
bam_maker = Bam.BamMaker(
sort=False,
make_index=False,
make_bam = self.unmapped_bam,
make_sam = self.unmapped_sam)
for seq in self.garbage_read:
print "Processing garbage reads :{}\tReads aligned :{} ".format(seq.name, seq.nread)
bam_maker.make(
header = self.bam_header,
read_col = seq.read_list,
outpath = self.result_dir+self.outprefix,
ref_name = seq.name)
def _distribution_output (self):
"""
"""
output = "{}{}_Reference_distribution.csv".format(self.result_dir, self.outprefix)
with open(output, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
# Table for all reference
writer.writerow(["Ref name","length","nread","RPKB"])
for ref in Reference.getInstances():
writer.writerow([ref.name, len(ref), ref.nread, float(ref.nread)/len(ref)*1000])
# Add a line for garbage reads excluding the secondary alignments
nread = sum([seq.nread for seq in self.garbage_read[1:]])
writer.writerow(["Unmaped_and LowMapq","NA",nread,"NA"])
output = "{}{}_Sequence_distribution.csv".format(self.result_dir, self.outprefix)
with open(output, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
# Table decomposing Sequence per Reference
writer.writerow(["Seq name","length","nread","RPKB"])
for ref in Reference.getInstances():
for seq in ref.seq_dict.values():
writer.writerow([seq.name, len(seq), seq.nread, float(seq.nread)/len(seq)*1000])
# Add a lines for garbage reads including the secondary alignments
for seq in self.garbage_read:
writer.writerow([seq.name, "NA", seq.nread, "NA"])
def _make_report (self):
"""
"""
output = "{}{}_parameters.txt".format(self.result_dir, self.outprefix)
with open(output, 'wb') as outfile:
# References options
outfile.write("################## REFERENCES ##################\n\n")
outfile.write(Reference.reprInstances())
if self.ref_masking:
outfile.write("Reference homologies were masked with RefMasker\n")
outfile.write("blastn options : {}\n".format(self.blastn_opt))
outfile.write("makeblastdb options : {}\n".format(self.mkblastdb_opt))
else:
outfile.write("No Reference homologies masking done\n")
# Fastq options
outfile.write("\n################## FASTQ FILES ##################\n\n")
outfile.write("R1 : {}\n".format(self.R1))
outfile.write("R2 : {}\n\n".format(self.R2))
if self.quality_filtering or self.adapter_trimming:
outfile.write(repr(self.fFilter)+"\n")
if self.quality_filtering:
outfile.write(repr (self.qFilter)+"\n")
if self.adapter_trimming:
outfile.write(repr (self.ssw_aligner)+"\n")
outfile.write(repr (self.trimmer)+"\n")
else:
outfile.write("\nNo Fastq Filtering done\n")
# bwa alignment options
outfile.write("\n################## BWA ALIGNMENT ##################\n\n")
outfile.write("index file : {}\n".format(self.bwa_index))
outfile.write("bwa index options: {}\n".format(self.bwa_index_opt))
outfile.write("bwa mem option: {}\n".format(self.bwa_mem_opt))
outfile.write("bwa threads : {}\n".format(self.bwa_threads))
# Output Options
outfile.write("\n################## OUTPUT ##################\n\n")
outfile.write("Minimal MAPQ score : {}\n".format(self.min_mapq))
outfile.write("Write garbage reads to sam: {}\n".format(str(self.unmapped_sam)))
outfile.write("Write garbage reads to bam: {}\n".format(str(self.unmapped_bam)))
outfile.write("Minimal depth for Coverage output : {}\n".format(self.cov_min_depth))
outfile.write("Minimal depth for Variant output : {}\n".format(self.var_min_depth))
outfile.write("Minimal Variant frequency : {}\n".format(self.var_min_freq))
#~~~~~~~TOP LEVEL INSTRUCTIONS~~~~~~~#
if __name__ == '__main__':
main = Main()
main()
| a-slide/ContaVect | ContaVect.py | Python | gpl-2.0 | 23,151 | [
"BLAST",
"BWA",
"pysam"
] | d5855a45a8d5e6f76bee430669031f255c8b997841985a651050ba47d32352a9 |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Dict
from typing import Union
from ORCA.scripts.BaseScript import cBaseScript
from ORCA.utils.FileName import cFileName
import ORCA.Globals as Globals
class cToolsTemplate(cBaseScript):
""" template class for discover scripts """
def __init__(self):
cBaseScript.__init__(self)
self.uType:str = u'TOOLS'
self.iHash:int = 0
self.oFnXML:Union[cFileName,None] = None
# self.aScriptSettingPlugins = []
def Init(self,uObjectName:str,oFnObject:Union[cFileName,str]=None) -> None:
cBaseScript.Init(self,uObjectName,oFnObject)
self.oFnXML = cFileName(Globals.oScripts.dScriptPathList[self.uObjectName])+"script.xml"
def RunScript(self, *args, **kwargs) -> Union[Dict,None]:
""" main entry point to run the script """
if 'register' in args or kwargs.get("caller")=="appstart":
return self.Register(*args,**kwargs)
elif "unregister" in args:
return self.UnRegister(*args,**kwargs)
return None
def Register(self,*args,**kwargs) -> None:
return None
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def UnRegister(self,*args,**kwargs) -> None:
return None
| thica/ORCA-Remote | src/ORCA/scripttemplates/Template_Tools.py | Python | gpl-3.0 | 2,165 | [
"ORCA"
] | 47c15d9ac9cd64aeef3dc036c3a83ded5899ce637e3e8b7c5287f0de536ccc68 |
from func import *
# ATTTENTION! Maybe there are some mistakes in neuron parameters!
logger = logging.getLogger('neuromodulation')
startbuild = datetime.datetime.now()
nest.ResetKernel()
nest.SetKernelStatus({'overwrite_files': True,
'local_num_threads': 8,
'resolution': 0.1})
generate_neurons(100000)
# Init parameters of our synapse models
DOPA_synparams_ex['vt'] = nest.Create('volume_transmitter')[0]
DOPA_synparams_in['vt'] = nest.Create('volume_transmitter')[0]
SERO_synparams_in['vt'] = nest.Create('volume_transmitter')[0]
SERO_synparams_ex['vt'] = nest.Create('volume_transmitter')[0]
NORA_synparams_ex['vt'] = nest.Create('volume_transmitter')[0]
nest.CopyModel('static_synapse', gen_static_syn, static_syn)
nest.CopyModel('stdp_synapse', glu_synapse, STDP_synparams_Glu)
nest.CopyModel('stdp_synapse', gaba_synapse, STDP_synparams_GABA)
nest.CopyModel('stdp_synapse', ach_synapse, STDP_synparams_ACh)
nest.CopyModel('stdp_dopamine_synapse', dopa_synapse_ex, DOPA_synparams_ex)
nest.CopyModel('stdp_dopamine_synapse', dopa_synapse_in, DOPA_synparams_in)
nest.CopyModel('stdp_serotonin_synapse', sero_synapse_ex, SERO_synparams_ex)
nest.CopyModel('stdp_serotonin_synapse', sero_synapse_in, SERO_synparams_in)
nest.CopyModel('stdp_noradrenaline_synapse', nora_synapse_ex, NORA_synparams_ex)
## - my .50
logger.debug("* * * Start connection initialisation")
####################################################################
# * * * ventral pathway * * *
connect(ldt[ldt_Ach],thalamus[thalamus_Glu], syn_type=ACh, weight_coef=5)
connect(ldt[ldt_Ach], bnst[bnst_Ach], syn_type=ACh, weight_coef=0.005)
connect(ldt[ldt_Ach], lc[lc_N0], syn_type=ACh, weight_coef=0.005)
connect(ldt[ldt_Ach], prefrontal[pfc_Glu0], syn_type=ACh, weight_coef=0.5)
connect(thalamus[thalamus_Glu], motor[motor_Glu0], syn_type=Glu, weight_coef=0.005)
connect(thalamus[thalamus_Glu], motor[motor_Glu1], syn_type=Glu, weight_coef=0.005)
connect(thalamus[thalamus_Glu], motor[motor_5HT], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu0], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu1], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(prefrontal[pfc_Glu0], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(prefrontal[pfc_Glu1], bnst[bnst_Glu], syn_type=Glu, weight_coef=0.005)
connect(bnst[bnst_Glu], bnst[bnst_GABA], syn_type=Glu, weight_coef=0.005)
connect(bnst[bnst_Ach], amygdala[amygdala_Ach], syn_type=ACh, weight_coef=0.005)
connect(bnst[bnst_GABA], hypothalamus[hypothalamus_pvn_GABA], syn_type=GABA, weight_coef=0.0005)
connect(amygdala[amygdala_Ach], lc[lc_Ach], syn_type=ACh, weight_coef=0.005)
connect(amygdala[amygdala_GABA], bnst[bnst_GABA], syn_type=GABA, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_GABA1], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_GABA0], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_5HT], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_NA], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_Ach], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_DA], syn_type=Glu, weight_coef=0.005)
connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_Glu0], syn_type=GABA, weight_coef=0.5)
connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_Glu1], syn_type=GABA, weight_coef=0.5)
connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_5HT], syn_type=GABA, weight_coef=0.5)
#inside LC
connect(lc[lc_Ach], lc[lc_GABA], syn_type=ACh, weight_coef=0.005)
connect(lc[lc_Ach], lc[lc_N0], syn_type=ACh, weight_coef=0.005)
connect(lc[lc_Ach], lc[lc_N1], syn_type=ACh, weight_coef=0.005)
connect(lc[lc_D1], lc[lc_N0], syn_type=DA_ex, weight_coef=0.005)
connect(lc[lc_D2], lc[lc_N1], syn_type=DA_in, weight_coef=0.005)
connect(lc[lc_GABA], lc[lc_N0], syn_type=GABA, weight_coef=0.005)
#* * * dorsal pathway * * *
connect(pgi[pgi_Glu], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(pgi[pgi_Glu], lc[lc_N1], syn_type=Glu, weight_coef=0.005)
connect(pgi[pgi_GABA], lc[lc_GABA], syn_type=GABA, weight_coef=0.005)
connect(prh[prh_GABA], lc[lc_GABA], syn_type=GABA, weight_coef=0.005)
connect(striatum[striatum_tan], lc[lc_GABA], syn_type=GABA, weight_coef=0.005)
connect(vta[vta_DA0], lc[lc_D1], syn_type=DA_ex, weight_coef=0.005)
connect(vta[vta_DA0], lc[lc_D2], syn_type=DA_in, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_tan], syn_type=DA_ex, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_GABA], syn_type=DA_ex, weight_coef=0.005)
wse = 0.1
wsi = 0.0001
#
# * * * NIGROSTRIATAL PATHWAY* * *
connect(motor[motor_Glu0], striatum[striatum_D1], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu0], snc[snc_DA], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu0], striatum[striatum_D2], syn_type=Glu, weight_coef=0.05)
connect(motor[motor_Glu0], thalamus[thalamus_Glu], syn_type=Glu, weight_coef=3) #0.0008
connect(motor[motor_Glu0], prefrontal[pfc_5HT], syn_type=Glu, weight_coef=0.3) ######not in the diagram
connect(motor[motor_Glu0], motor[motor_5HT], syn_type=Glu, weight_coef=0.003) ######not in the diagram
connect(motor[motor_Glu0], stn[stn_Glu], syn_type=Glu, weight_coef=7)
connect(motor[motor_Glu1], striatum[striatum_D1], syn_type=Glu)
connect(motor[motor_Glu1], striatum[striatum_D2], syn_type=Glu)
connect(motor[motor_Glu0], thalamus[thalamus_Glu], syn_type=Glu,weight_coef=5)
connect(motor[motor_Glu1], stn[stn_Glu], syn_type=Glu)
connect(motor[motor_Glu1], nac[nac_GABA0], syn_type=GABA)
connect(striatum[striatum_tan], striatum[striatum_D1], syn_type=GABA)
connect(striatum[striatum_tan], striatum[striatum_D2], syn_type=Glu)
connect(striatum[striatum_D1], snr[snr_GABA], syn_type=GABA, weight_coef=0.001)
connect(striatum[striatum_D1], gpi[gpi_GABA], syn_type=GABA, weight_coef=0.001)
connect(striatum[striatum_D1], gpe[gpe_GABA], syn_type=GABA, weight_coef=0.005)
connect(striatum[striatum_D2], gpe[gpe_GABA], syn_type=GABA, weight_coef=1)
connect(gpe[gpe_GABA], stn[stn_Glu], syn_type=GABA, weight_coef=0.0001)
connect(gpe[gpe_GABA], striatum[striatum_D1], syn_type=GABA, weight_coef=0.001)
connect(gpe[gpe_GABA], striatum[striatum_D2], syn_type=GABA, weight_coef=0.3)
connect(gpe[gpe_GABA], gpi[gpi_GABA], syn_type=GABA, weight_coef=0.0001)
connect(gpe[gpe_GABA], snr[snr_GABA], syn_type=GABA, weight_coef=0.0001)
connect(stn[stn_Glu], snr[snr_GABA], syn_type=Glu, weight_coef=0.2)
connect(stn[stn_Glu], gpi[gpi_GABA], syn_type=Glu, weight_coef=0.2)
connect(stn[stn_Glu], gpe[gpe_GABA], syn_type=Glu, weight_coef=0.3)
connect(stn[stn_Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.01)
connect(gpi[gpi_GABA], thalamus[thalamus_Glu], syn_type=GABA, weight_coef=0.0001) # weight_coef=3)
connect(snr[snr_GABA], thalamus[thalamus_Glu], syn_type=GABA, weight_coef=0.0001) # weight_coef=3)
connect(thalamus[thalamus_Glu], motor[motor_Glu1], syn_type=Glu)
connect(thalamus[thalamus_Glu], stn[stn_Glu], syn_type=Glu, weight_coef=1) #005
connect(thalamus[thalamus_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], nac[nac_GABA0], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_GABA1], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_Ach], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_DA], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_5HT], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_NA], syn_type=Glu)
# * * * INTEGRATED PATHWAY * * *
connect(prefrontal[pfc_Glu0], vta[vta_DA0], syn_type=Glu)
connect(prefrontal[pfc_Glu0], nac[nac_GABA1], syn_type=Glu)
connect(prefrontal[pfc_Glu1], vta[vta_GABA2], syn_type=Glu)
connect(prefrontal[pfc_Glu1], nac[nac_GABA1], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_GABA0], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_GABA1], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_Ach], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_DA], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_5HT], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_NA], syn_type=Glu)
connect(amygdala[amygdala_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.3)
# * * * MESOCORTICOLIMBIC PATHWAY * * *
connect(nac[nac_Ach], nac[nac_GABA1], syn_type=ACh)
connect(nac[nac_GABA0], nac[nac_GABA1],syn_type=GABA,)
connect(nac[nac_GABA1], vta[vta_GABA2],syn_type=GABA,)
connect(vta[vta_GABA0], prefrontal[pfc_Glu0],syn_type=GABA,weight_coef=0.0005)
connect(vta[vta_GABA0], pptg[pptg_GABA],syn_type=GABA,)
connect(vta[vta_GABA1], vta[vta_DA0],syn_type=GABA,)
connect(vta[vta_GABA1], vta[vta_DA1],syn_type=GABA,)
connect(vta[vta_GABA2], nac[nac_GABA1],syn_type=GABA,)
connect(pptg[pptg_GABA], vta[vta_GABA0],syn_type=GABA,)
connect(pptg[pptg_GABA], snc[snc_GABA], syn_type=GABA,weight_coef=0.005)
connect(pptg[pptg_ACh], vta[vta_GABA0], syn_type=ACh)
connect(pptg[pptg_ACh], vta[vta_DA1], syn_type=ACh)
connect(pptg[pptg_Glu], vta[vta_GABA0], syn_type=Glu)
connect(pptg[pptg_Glu], vta[vta_DA1], syn_type=Glu)
connect(pptg[pptg_ACh], striatum[striatum_D1], syn_type=ACh, weight_coef=0.3)
connect(pptg[pptg_ACh], snc[snc_GABA], syn_type=ACh, weight_coef=0.005)
connect(pptg[pptg_Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.005)
if noradrenaline_flag:
logger.debug("* * * Making neuromodulating connections...")
#vt_ex = nest.Create('volume_transmitter')
#vt_in = nest.Create('volume_transmitter')
#NORA_synparams_ex['vt'] = vt_ex[0]
#NORA_synparams_in['vt'] = vt_in[0]
connect(nts[nts_a1], lc[lc_N0], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a1], bnst[bnst_Glu], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], lc[lc_N1], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], striatum[striatum_tan], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], striatum[striatum_GABA], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], amygdala[amygdala_Glu], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], amygdala[amygdala_Ach], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], amygdala[amygdala_GABA], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], bnst[bnst_Glu], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], motor[motor_Glu0], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], motor[motor_Glu1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], prefrontal[pfc_Glu1], syn_type=NA_ex, weight_coef=0.5)
connect(lc[lc_N0], vta[vta_a1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], ldt[ldt_a1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], ldt[ldt_a2], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], striatum[striatum_tan], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], striatum[striatum_GABA], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], rn[rn_a1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], rn[rn_a2], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a1], rn[rn_dr], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a2], rn[rn_mnr], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a2], rn[rn_rpa], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a2], rn[rn_rmg], syn_type=NA_ex, weight_coef=0.005)
#connect(vta[vta_a1], vta[vta_DA1], syn_type=NA_in, weight_coef=0.005)
if serotonin_flag:
# * * * AFFERENT PROJECTIONS * *
connect(vta[vta_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
connect(septum[septum_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
connect(septum[septum_5HT], rn[rn_mnr], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_5HT], rn[rn_mnr], syn_type=SERO_ex, weight_coef=wse)
connect(hypothalamus[hypothalamus_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse)
connect(hypothalamus[hypothalamus_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(periaqueductal_gray[periaqueductal_gray_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse)
connect(periaqueductal_gray[periaqueductal_gray_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(bnst[bnst_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(amygdala[amygdala_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(amygdala[amygdala_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse)
connect(hippocampus[hippocampus_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
# * * * EFFERENT PROJECTIONS * * *
connect(rn[rn_dr], striatum[striatum_5HT], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], striatum[striatum_D2], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], striatum[striatum_GABA], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], striatum[striatum_Ach], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], nac[nac_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_GABA0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_GABA1], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_Ach], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_DA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], snr[snr_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], septum[septum_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) #? tune weights
connect(rn[rn_dr], thalamus[thalamus_Glu], syn_type=SERO_in, weight_coef=wsi) #? tune weights
connect(rn[rn_dr], lateral_cortex[lateral_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], entorhinal_cortex[entorhinal_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], prefrontal[pfc_Glu0], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], prefrontal[pfc_Glu1], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], prefrontal[pfc_DA], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], prefrontal[pfc_NA], syn_type=SERO_in, weight_coef=wsi) #!!!
connect(rn[rn_dr], lateral_tegmental_area[lateral_tegmental_area_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], lc[lc_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], lc[lc_N0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], lc[lc_N1], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_Glu], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_Ach], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], hippocampus[hippocampus_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_Glu], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_Ach], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], vta[vta_5HT], syn_type=SERO_in, weight_coef=wsi) #!!! 0.005
connect(rn[rn_mnr], vta[vta_a1], syn_type=SERO_in, weight_coef=wsi) #!!! 0.005
connect(rn[rn_mnr], vta[vta_DA1], syn_type=SERO_in, weight_coef=wsi) #!!! 0.005
connect(rn[rn_mnr], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) #?
connect(rn[rn_mnr], thalamus[thalamus_Glu], syn_type=SERO_in, weight_coef=wsi) #? tune weights 0.005
connect(rn[rn_mnr], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], prefrontal[pfc_Glu0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], prefrontal[pfc_Glu1], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], motor[motor_Glu0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], motor[motor_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], insular_cortex[insular_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], medial_cortex[medial_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], neocortex[neocortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], hypothalamus[hypothalamus_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], hypothalamus[hypothalamus_pvn_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], hippocampus[hippocampus_5HT], syn_type=SERO_in, weight_coef=wsi)
# * * * THALAMOCORTICAL PATHWAY * * *
connect(thalamus[thalamus_5HT], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wse)
connect(thalamus[thalamus_5HT], motor[motor_5HT], syn_type=SERO_ex, weight_coef=wse)
connect(thalamus[thalamus_5HT], motor[motor_Glu0], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_5HT], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # main was 0.005
connect(motor[motor_5HT], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # main was 0.005
if dopamine_flag:
logger.debug("* * * Making neuromodulating connections...")
# NIGROSTRIATAL
# connect(snc[snc_DA], striatum[striatum_D1], syn_type=DA_ex)
connect(snc[snc_DA], gpe[gpe_GABA], syn_type=DA_ex)
connect(snc[snc_DA], stn[stn_Glu], syn_type=DA_ex)
connect(snc[snc_DA], nac[nac_GABA0], syn_type=DA_ex)
connect(snc[snc_DA], nac[nac_GABA1], syn_type=DA_ex)
connect(snc[snc_DA], striatum[striatum_D2], syn_type=DA_in)
connect(snc[snc_DA], striatum[striatum_tan], syn_type=DA_in)
# MESOCORTICOLIMBIC
connect(vta[vta_DA0], striatum[striatum_D1], syn_type=DA_ex)
connect(vta[vta_DA0], striatum[striatum_D2], syn_type=DA_in)
connect(vta[vta_DA0], prefrontal[pfc_Glu0], syn_type=DA_ex,weight_coef=0.5)
connect(vta[vta_DA0], prefrontal[pfc_Glu1], syn_type=DA_ex,weight_coef=0.5)
connect(vta[vta_DA1], nac[nac_GABA0], syn_type=DA_ex)
connect(vta[vta_DA1], nac[nac_GABA1], syn_type=DA_ex)
if dopamine_flag and serotonin_flag and noradrenaline_flag:
# * * * DOPAMINE INTERACTION * * *
connect(prefrontal[pfc_5HT], prefrontal[pfc_DA], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_DA], vta[vta_5HT], syn_type=DA_in, weight_coef=0.005)
connect(prefrontal[pfc_DA], vta[vta_DA1], syn_type=DA_in, weight_coef=0.005)
#connect(vta[vta_5HT], vta[vta_DA1], syn_type=SERO_in, weight_coef=0.005)
connect(vta[vta_5HT], vta[vta_DA1], syn_type=SERO_ex, weight_coef=wse)
connect(vta[vta_DA1], prefrontal[pfc_5HT], syn_type=DA_ex, weight_coef=0.5)
connect(vta[vta_DA1], prefrontal[pfc_DA], syn_type=DA_ex, weight_coef=0.5)
#connect(vta[vta_DA1], striatum[striatum_5HT], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_5HT], syn_type=DA_ex, weight_coef=0.005)
#connect(vta[vta_DA1], striatum[striatum_DA], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_D1], syn_type=DA_ex, weight_coef=0.005)
#connect(vta[vta_DA1], nac[nac_5HT], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], nac[nac_5HT], syn_type=DA_ex, weight_coef=0.005)
#connect(vta[vta_DA1], nac[nac_DA], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], nac[nac_DA], syn_type=DA_ex, weight_coef=0.005)
#connect(striatum[striatum_5HT], striatum[striatum_DA], syn_type=SERO_in, weight_coef=0.005)
connect(striatum[striatum_5HT], striatum[striatum_D1], syn_type=SERO_ex, weight_coef=wse) #??????????????????????????????????? D1, D2?
#connect(striatum[striatum_DA], snr[snr_GABA], syn_type=DOPA_in, weight_coef=0.005)
connect(striatum[striatum_D1], snr[snr_GABA], syn_type=DA_ex, weight_coef=0.005)
#connect(striatum[striatum_DA], snc[snc_DA], syn_type=DOPA_in, weight_coef=0.005)
# connect(striatum[striatum_D1], snc[snc_GABA], syn_type=DA_ex, weight_coef=0.005)
# connect(striatum[striatum_D1], snc[snc_DA], syn_type=DA_ex, weight_coef=0.005)
connect(nac[nac_5HT], nac[nac_DA], syn_type=SERO_ex, weight_coef=wse)
connect(snr[snr_GABA], snc[snc_DA], syn_type=SERO_in, weight_coef=wsi)
connect(snc[snc_GABA], striatum[striatum_5HT], syn_type=DA_in, weight_coef=0.005) #?
connect(snc[snc_DA], striatum[striatum_5HT], syn_type=DA_in, weight_coef=0.005)
connect(snc[snc_DA], striatum[striatum_D1], syn_type=DA_in, weight_coef=0.005)
connect(snc[snc_DA], nac[nac_5HT], syn_type=DA_in, weight_coef=0.005)
connect(snc[snc_DA], nac[nac_DA], syn_type=DA_in, weight_coef=0.005)
connect(lc[lc_5HT], lc[lc_D1], syn_type=SERO_ex, weight_coef=0.005)
connect(lc[lc_D1], rn[rn_dr], syn_type=DA_ex, weight_coef=0.005)
# * * * NORADRENALINE INTERACTION * * *
connect(lc[lc_5HT], lc[lc_N0], syn_type=SERO_in, weight_coef=0.005)
connect(lc[lc_5HT], lc[lc_N1], syn_type=SERO_in, weight_coef=0.005)
logger.debug("* * * Attaching spike generators...")
rewards = [0.5, 0.3, 0.8, 0.4, 1]
state_values = [0., 0., 0.]
prev_index = 1
current_index = 1
delta, gamma = 0, 0
d, g = [], []
learning_rate = 1.
c = 0.5
k = 100.
for reward in rewards:
# calcualte TD-error
delta = reward + gamma * state_values[current_index] - state_values[prev_index]
d.append(delta)
gamma += delta * c # update gamma value
if gamma > 1:
gamma = 1
g.append(gamma)
state_values[current_index] += learning_rate * delta # update current value
prev_index = current_index
if delta > 0:
current_index = 2
elif delta < 0:
# delta = 0.
current_index = 0
else:
current_index = 1
connect_generator(rn[rn_dr], k, k + 12., rate=250, coef_part=1, weight=10 * gamma)
connect_generator(rn[rn_mnr], k, k + 12., rate=250, coef_part=1, weight=10 * gamma)
# connect_generator(motor[motor_Glu0], k, k + 12., rate=250, coef_part=1)
connect_generator(vta[vta_DA0], k, k + 12., rate=250, coef_part=1, weight=15 * delta)
connect_generator(snc[snc_DA], k, k + 12., rate=250, coef_part=1, weight=15 * delta)
k += 100.
logger.debug("* * * Attaching spikes detector")
for part in getAllParts():
connect_detector(part)
logger.debug("* * * Attaching multimeters")
for part in getAllParts():
connect_multimeter(part)
del generate_neurons, connect, connect_generator, connect_detector, connect_multimeter
endbuild = datetime.datetime.now()
simulate()
get_log(startbuild, endbuild)
save(GUI=status_gui)
print "delta=%s, gamma=%s" % (str(d), str(g))
| research-team/NEUCOGAR | NEST/appraisal/scripts/neuromodulation.py | Python | gpl-2.0 | 24,062 | [
"NEURON"
] | aa7c6d8228d516d72198f10b203bcf1bb2de02553c93fb1da02765f77e832333 |
##############################################################################
# MSIBI: A package for optimizing coarse-grained force fields using multistate
# iterative Boltzmann inversion.
# Copyright (c) 2017 Vanderbilt University and the Authors
#
# Authors: Christoph Klein, Timothy C. Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files, to deal
# in MSIBI without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# # copies of MSIBI, and to permit persons to whom MSIBI is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of MSIBI.
#
# MSIBI IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH MSIBI OR THE USE OR OTHER DEALINGS ALONG WITH
# MSIBI.
#
# You should have received a copy of the MIT license.
# If not, see <https://opensource.org/licenses/MIT/>.
##############################################################################
import glob
import os
from pkg_resources import resource_filename
import shutil
import numpy as np
def get_fn(name):
"""Get the full path to one of the reference files shipped for testing.
This function is taken straight from MDTraj (see https://github.com/mdtraj/mdtraj).
In the source distribution, these files are in ``msibi/utils/reference``,
but on istallation, they're moved to somewhere in the user's python
site-packages directory.
Parameters
----------
name : str
Name of the file ot load (with respecto to the reference/ directory).
Examples
________
>>> import mdtraj as md
>>> t = md.load(get_fun('final.hoomdxml'))
"""
fn = resource_filename('msibi', os.path.join('utils', 'reference', name))
if not os.path.exists(fn):
raise ValueError('Sorry! %s does not exist. If you just '
'added it, you\'ll have to re install' % fn)
return fn
def find_nearest(array, target):
"""Find array component whose numeric value is closest to 'target'. """
idx = np.abs(array - target).argmin()
return idx, array[idx]
def _count_backups(filename):
"""Count the number of backups of a file in a directory. """
head, tail = os.path.split(filename)
backup_files = ''.join(['_.*.', tail])
return len(glob.glob(os.path.join(head, backup_files)))
def _backup_name(filename, n_backups):
"""Return backup filename based on the number of existing backups.
Parameters
----------
filename : str
Full path to file to make backup of.
n_backups : int
Number of existing backups.
"""
head, tail = os.path.split(filename)
new_backup = ''.join(['_.{0:d}.'.format(n_backups), tail])
return os.path.join(head, new_backup)
def backup_file(filename):
"""Backup a file based on the number of backups in the file's directory.
Parameters
----------
filename : str
Full path to file to make backup of.
"""
n_backups = _count_backups(filename)
new_backup = _backup_name(filename, n_backups)
shutil.copy(filename, new_backup)
| mosdef-hub/msibi | msibi/utils/general.py | Python | mit | 3,629 | [
"MDTraj"
] | 6708fe37dca549133a661a16cd1f46c6c6db2d7ec3a250bc2a9424bd51222f12 |
#Brian Refsdal's parallel_map, from astropython.org
#Not sure what license this is released under, but until I know better:
#
#Copyright (c) 2010, Brian Refsdal
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
#1. Redistributions of source code must retain the above copyright
#notice, this list of conditions and the following disclaimer.
#
#2. Redistributions in binary form must reproduce the above copyright
#notice, this list of conditions and the following disclaimer in the
#documentation and/or other materials provided with the distribution.
#
#3. The name of the author may not be used to endorse or promote
#products derived from this software without specific prior written
#permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import platform
import numpy
_multi=False
_ncpus=1
try:
# May raise ImportError
import multiprocessing
_multi=True
# May raise NotImplementedError
_ncpus = multiprocessing.cpu_count()
except:
pass
__all__ = ('parallel_map',)
def worker(f, ii, chunk, out_q, err_q, lock):
"""
A worker function that maps an input function over a
slice of the input iterable.
:param f : callable function that accepts argument from iterable
:param ii : process ID
:param chunk: slice of input iterable
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param lock : thread-safe lock to protect a resource
( useful in extending parallel_map() )
"""
vals = []
# iterate over slice
for val in chunk:
try:
result = f(val)
except Exception as e:
err_q.put(e)
return
vals.append(result)
# output the result and task ID to output queue
out_q.put( (ii, vals) )
def run_tasks(procs, err_q, out_q, num):
"""
A function that executes populated processes and processes
the resultant array. Checks error queue for any exceptions.
:param procs: list of Process objects
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param num : length of resultant array
"""
# function to terminate processes that are still running.
die = (lambda vals : [val.terminate() for val in vals
if val.exitcode is None])
try:
for proc in procs:
proc.start()
for proc in procs:
proc.join()
except Exception as e:
# kill all slave processes on ctrl-C
try:
die(procs)
finally:
raise e
if not err_q.empty():
# kill all on any exception from any one slave
try:
die(procs)
finally:
raise err_q.get()
# Processes finish in arbitrary order. Process IDs double
# as index in the resultant array.
results=[None]*num;
while not out_q.empty():
idx, result = out_q.get()
results[idx] = result
# Remove extra dimension added by array_split
return list(numpy.concatenate(results))
def parallel_map(function, sequence, numcores=None):
"""
A parallelized version of the native Python map function that
utilizes the Python multiprocessing module to divide and
conquer sequence.
parallel_map does not yet support multiple argument sequences.
:param function: callable function that accepts argument from iterable
:param sequence: iterable sequence
:param numcores: number of cores to use
"""
if not callable(function):
raise TypeError("input function '%s' is not callable" %
repr(function))
if not numpy.iterable(sequence):
raise TypeError("input '%s' is not iterable" %
repr(sequence))
size = len(sequence)
if not _multi or size == 1:
return map(function, sequence)
if numcores is None:
numcores = _ncpus
if platform.system() == 'Windows': # JB: don't think this works on Win
return list(map(function,sequence))
# Use fork-based parallelism (because spawn fails with pickling issues, #457)
ctx= multiprocessing.get_context('fork')
# Returns a started SyncManager object which can be used for sharing
# objects between processes. The returned manager object corresponds
# to a spawned child process and has methods which will create shared
# objects and return corresponding proxies.
manager = ctx.Manager()
# Create FIFO queue and lock shared objects and return proxies to them.
# The managers handles a server process that manages shared objects that
# each slave process has access to. Bottom line -- thread-safe.
out_q = manager.Queue()
err_q = manager.Queue()
lock = manager.Lock()
# if sequence is less than numcores, only use len sequence number of
# processes
if size < numcores:
numcores = size
# group sequence into numcores-worth of chunks
sequence = numpy.array_split(sequence, numcores)
procs = [ctx.Process(target=worker,
args=(function, ii, chunk, out_q, err_q, lock))
for ii, chunk in enumerate(sequence)]
return run_tasks(procs, err_q, out_q, numcores)
if __name__ == "__main__":
"""
Unit test of parallel_map()
Create an arbitrary length list of references to a single
matrix containing random floats and compute the eigenvals
in serial and parallel. Compare the results and timings.
"""
import time
numtasks = 5
#size = (1024,1024)
size = (512,512)
vals = numpy.random.rand(*size)
f = numpy.linalg.eigvals
iterable = [vals]*numtasks
print ('Running numpy.linalg.eigvals %iX on matrix size [%i,%i]' %
(numtasks,size[0],size[1]))
tt = time.time()
presult = parallel_map(f, iterable)
print('parallel map in %g secs' % (time.time()-tt))
tt = time.time()
result = map(f, iterable)
print('serial map in %g secs' % (time.time()-tt))
assert (numpy.asarray(result) == numpy.asarray(presult)).all()
| jobovy/galpy | galpy/util/multi.py | Python | bsd-3-clause | 6,614 | [
"Brian"
] | 3ca2b89abaf6f50798b57f968fad082e6a3ca11d1f45bbd95e18b0390e34f44e |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides classes for analyzing phase diagrams.
"""
from six.moves import zip
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2012"
import numpy as np
import itertools
import collections
from monty.functools import lru_cache
from pymatgen.core.composition import Composition
from pymatgen.phasediagram.maker import PhaseDiagram, \
GrandPotentialPhaseDiagram, get_facets
from pymatgen.analysis.reaction_calculator import Reaction
from pymatgen.util.coord_utils import Simplex
class PDAnalyzer(object):
"""
A class for performing analyses on Phase Diagrams.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
numerical_tol = 1e-8
def __init__(self, pd):
"""
Initializes analyzer with a PhaseDiagram.
Args:
pd: Phase Diagram to analyze.
"""
self._pd = pd
def _make_comp_matrix(self, complist):
"""
Helper function to generates a normalized composition matrix from a
list of compositions.
"""
return np.array([[comp.get_atomic_fraction(el)
for el in self._pd.elements] for comp in complist])
@lru_cache(1)
def _get_facet(self, comp):
"""
Get any facet that a composition falls into. Cached so successive
calls at same composition are fast.
"""
if set(comp.elements).difference(self._pd.elements):
raise ValueError('{} has elements not in the phase diagram {}'
''.format(comp, self._pd.elements))
c = [comp.get_atomic_fraction(e) for e in self._pd.elements[1:]]
for f, s in zip(self._pd.facets, self._pd.simplices):
if Simplex(s).in_simplex(c, PDAnalyzer.numerical_tol / 10):
return f
raise RuntimeError("No facet found for comp = {}".format(comp))
def get_decomposition(self, comp):
"""
Provides the decomposition at a particular composition.
Args:
comp: A composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
facet = self._get_facet(comp)
comp_list = [self._pd.qhull_entries[i].composition for i in facet]
m = self._make_comp_matrix(comp_list)
compm = self._make_comp_matrix([comp])
decomp_amts = np.linalg.solve(m.T, compm.T)
return {self._pd.qhull_entries[f]: amt[0]
for f, amt in zip(facet, decomp_amts)
if abs(amt[0]) > PDAnalyzer.numerical_tol}
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
e = 0
for k, v in self.get_decomposition(comp).items():
e += k.energy_per_atom * v
return e * comp.num_atoms
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
"""
if entry in self._pd.stable_entries:
return {entry: 1}, 0
facet = self._get_facet(entry.composition)
comp_list = [self._pd.qhull_entries[i].composition for i in facet]
m = self._make_comp_matrix(comp_list)
compm = self._make_comp_matrix([entry.composition])
decomp_amts = np.linalg.solve(m.T, compm.T)[:, 0]
decomp = {self._pd.qhull_entries[facet[i]]: decomp_amts[i]
for i in range(len(decomp_amts))
if abs(decomp_amts[i]) > PDAnalyzer.numerical_tol}
energies = [self._pd.qhull_entries[i].energy_per_atom for i in facet]
ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)
if allow_negative or ehull >= -PDAnalyzer.numerical_tol:
return decomp, ehull
raise ValueError("No valid decomp found!")
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry: A PDEntry like object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
def get_equilibrium_reaction_energy(self, entry):
"""
Provides the reaction energy of a stable entry from the neighboring
equilibrium stable entries (also known as the inverse distance to
hull).
Args:
entry: A PDEntry like object
Returns:
Equilibrium reaction energy of entry. Stable entries should have
equilibrium reaction energy <= 0.
"""
if entry not in self._pd.stable_entries:
raise ValueError("Equilibrium reaction energy is available only "
"for stable entries.")
if entry.is_element:
return 0
entries = [e for e in self._pd.stable_entries if e != entry]
modpd = PhaseDiagram(entries, self._pd.elements)
analyzer = PDAnalyzer(modpd)
return analyzer.get_decomp_and_e_above_hull(entry,
allow_negative=True)[1]
def get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
complist = [self._pd.qhull_entries[i].composition for i in facet]
energylist = [self._pd.qhull_entries[i].energy_per_atom for i in facet]
m = self._make_comp_matrix(complist)
chempots = np.linalg.solve(m, energylist)
return dict(zip(self._pd.elements, chempots))
def get_composition_chempots(self, comp):
facet = self._get_facet(comp)
return self.get_facet_chempots(facet)
def get_transition_chempots(self, element):
"""
Get the critical chemical potentials for an element in the Phase
Diagram.
Args:
element: An element. Has to be in the PD in the first place.
Returns:
A sorted sequence of critical chemical potentials, from less
negative to more negative.
"""
if element not in self._pd.elements:
raise ValueError("get_transition_chempots can only be called with "
"elements in the phase diagram.")
critical_chempots = []
for facet in self._pd.facets:
chempots = self.get_facet_chempots(facet)
critical_chempots.append(chempots[element])
clean_pots = []
for c in sorted(critical_chempots):
if len(clean_pots) == 0:
clean_pots.append(c)
else:
if abs(c - clean_pots[-1]) > PDAnalyzer.numerical_tol:
clean_pots.append(c)
clean_pots.reverse()
return tuple(clean_pots)
def get_element_profile(self, element, comp, comp_tol=1e-5):
"""
Provides the element evolution data for a composition.
For example, can be used to analyze Li conversion voltages by varying
uLi and looking at the phases formed. Also can be used to analyze O2
evolution by varying uO2.
Args:
element: An element. Must be in the phase diagram.
comp: A Composition
comp_tol: The tolerance to use when calculating decompositions.
Phases with amounts less than this tolerance are excluded.
Defaults to 1e-5.
Returns:
Evolution data as a list of dictionaries of the following format:
[ {'chempot': -10.487582010000001, 'evolution': -2.0,
'reaction': Reaction Object], ...]
"""
if element not in self._pd.elements:
raise ValueError("get_transition_chempots can only be called with"
" elements in the phase diagram.")
chempots = self.get_transition_chempots(element)
stable_entries = self._pd.stable_entries
gccomp = Composition({el: amt for el, amt in comp.items()
if el != element})
elref = self._pd.el_refs[element]
elcomp = Composition(element.symbol)
prev_decomp = []
evolution = []
def are_same_decomp(decomp1, decomp2):
for comp in decomp2:
if comp not in decomp1:
return False
return True
for c in chempots:
gcpd = GrandPotentialPhaseDiagram(
stable_entries, {element: c - 1e-5}, self._pd.elements
)
analyzer = PDAnalyzer(gcpd)
gcdecomp = analyzer.get_decomposition(gccomp)
decomp = [gcentry.original_entry.composition
for gcentry, amt in gcdecomp.items()
if amt > comp_tol]
decomp_entries = [gcentry.original_entry
for gcentry, amt in gcdecomp.items()
if amt > comp_tol]
if not are_same_decomp(prev_decomp, decomp):
if elcomp not in decomp:
decomp.insert(0, elcomp)
rxn = Reaction([comp], decomp)
rxn.normalize_to(comp)
prev_decomp = decomp
amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]
evolution.append({'chempot': c,
'evolution': amt,
'element_reference': elref,
'reaction': rxn, 'entries': decomp_entries})
return evolution
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
all_chempots = []
pd = self._pd
facets = pd.facets
for facet in facets:
chempots = self.get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in pd.elements])
inds = [pd.elements.index(el) for el in elements]
el_energies = {el: 0.0 for el in elements}
if referenced:
el_energies = {el: pd.el_refs[el].energy_per_atom
for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self._pd.elements)))]
if len(all_chempots) > len(self._pd.elements):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = facets[combi[0]]
data2 = facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if len(common_ent_ind) == len(elements):
common_entries = [pd.qhull_entries[i]
for i in common_ent_ind]
data = np.array([[all_chempots[i][j]
- el_energies[pd.elements[j]]
for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):
"""
returns a set of chemical potentials corresponding to the vertices of the simplex
in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition except dep_elt.
The chemical potential of dep_elt is computed from the target composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed from the energy of
the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element:mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
"""
muref = np.array([self._pd.el_refs[e].energy_per_atom
for e in self._pd.elements if e != dep_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self._pd.elements if e != dep_elt])
for e in self._pd.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self._pd.elements if e != dep_elt]
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[dep_elt] / target_comp[dep_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
elts = [e for e in self._pd.elements if e != dep_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = v[i] + muref[i]
res[dep_elt]=(np.dot(v+muref, coeff)+ef)/target_comp[dep_elt]
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if abs(di[k]-res[k]) > tol_en:
dict_equals = False
break
if dict_equals:
already_in = True
break
if not already_in:
all_coords.append(res)
return all_coords
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials correspoding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self._pd.el_refs[e].energy_per_atom
for e in self._pd.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self._pd.elements if e != open_elt])
for e in self._pd.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self._pd.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
max_mus = v
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
min_mus = v
elts = [e for e in self._pd.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res
| aykol/pymatgen | pymatgen/phasediagram/analyzer.py | Python | mit | 19,145 | [
"pymatgen"
] | 6c244f9345575494969f5ebd9213983e44f02ef285a208f9d8a96e938d6213d6 |
import datetime
import difflib
from flask import redirect, render_template, render_template_string, Blueprint, flash
from flask import request, url_for, jsonify
from flask_user import current_user, login_required, roles_accepted
from flask_login.mixins import AnonymousUserMixin
from flask_mail import Mail, Message
from werkzeug.datastructures import CombinedMultiDict
import json, random
import cPickle as pickle
import os
import uuid
import datetime
from app.init_app import app, db
from app.models import (UserProfileForm, FriendForm, Graph, GraphRevision, User,
Friendship, GraphViewRevision, FriendshipInvite)
from app.images import process_profile_picture
from app.utils import (subjective_graph_nodes, subjective_graph_edges,
objective_graph_nodes, objective_graph_edges)
# set up Flask Mail
mail = Mail(app)
# The Home page is accessible to anyone
@app.route('/home')
@login_required
def home_page():
action_items = []
if not current_user.photo_file_name or current_user.photo_file_name == 'default.png' :
action_item = {'type' : 'photo', 'url' : '/pages/profile', 'message':'Please upload your profile picture'}
action_items.append(action_item)
return render_template('pages/home_page.html', action_items=action_items)
@app.route('/landing')
def landing_page():
return render_template('pages/landing_page.html')
@app.route('/')
def index():
if current_user.is_anonymous:
return render_template('pages/landing_page.html')
else:
return redirect(url_for('home_page'))
@app.route('/graphs')
@login_required # Limits access to authenticated users
def graph_list_page():
graphs = []
for g in current_user.graphs_owned:
graphs.append(g)
for g in current_user.graphs_helping:
graphs.append(g)
return render_template('pages/graph_list_page.html', graphs=graphs)
def get_graph_data(graph):
revision = graph.current_revision
nodes = pickle.loads(str(revision.nodes))
edges = pickle.loads(str(revision.edges))
helpers = []
default_helper = None
owner_helper = None
for u in graph.owners + graph.helpers:
views = GraphViewRevision.query.filter(
(GraphViewRevision.graph_id == graph.id)
& (GraphViewRevision.author_id == u.id)).order_by(
'timestamp').all()
# construct helper dict to pass into JS
h = dict(id=u.id,
name=" ".join([u.first_name, u.last_name]),
photo=os.path.join('/static/images/users/',
u.photo_file_name))
if len(views) > 0:
view = views[-1]
h['view_nodes'] = pickle.loads(str(view.nodes))
h['view_edges'] = pickle.loads(str(view.edges))
else:
# if no views from this helper, use empty lists
h['view_nodes'] = []
h['view_edges'] = []
helpers.append(h)
if u == current_user:
default_helper = h
if u in graph.owners:
owner_helper = h
if default_helper is None:
default_helper = owner_helper
assert default_helper is not None
return nodes, edges, helpers, default_helper
@app.route('/graph/<id>')
@login_required # Limits access to authenticated users
def graph_page(id):
graph = Graph.query.get(id)
if current_user not in graph.owners and current_user not in graph.helpers:
return redirect(url_for('graph_list_page'))
nodes, edges, helpers, default_helper = get_graph_data(graph)
return render_template('pages/graph_page.html', save_id=id,
graph_name=graph.name,
nodes=json.dumps(nodes), edges=json.dumps(edges),
helpers=json.dumps(helpers),
default_helper=json.dumps(default_helper))
@app.route('/_graph/<id>')
@login_required # Limits access to authenticated users
def graph_json(id):
graph = Graph.query.get(id)
if current_user not in graph.owners and current_user not in graph.helpers:
return redirect(url_for('graph_list_page'))
nodes, edges, helpers, default_helper = get_graph_data(graph)
graph_data = dict(nodes=nodes,
edges=edges,
helpers=helpers,
default_helper=default_helper)
return jsonify(graph=graph_data)
@app.route('/graph_diff/<id>')
@login_required
def graph_diff(id):
new_revision = GraphRevision.query.get(id)
if new_revision.previous_revision_id is None:
diff = difflib.HtmlDiff().make_table([''],
new_revision.string().split('\n'))
else:
old_revision = GraphRevision.query.get(new_revision.previous_revision_id)
diff = difflib.HtmlDiff().make_table(old_revision.string().split('\n'),
new_revision.string().split('\n'))
return render_template('pages/graph_diff_page.html', diff=diff)
@app.route('/graph_history/<id>')
@login_required
def graph_history(id):
graph = Graph.query.get(id)
return render_template('pages/graph_history_page.html', graph=graph)
@app.route('/newgraph')
@login_required # Limits access to authenticated users
def graph_create_page():
print 'newgraph'
return render_template('pages/graph_page.html',
nodes=json.dumps([]),
edges=json.dumps([]))
@app.route('/_save_graph', methods=['POST'])
@login_required # Limits access to authenticated users
def save_graph():
data = json.loads(request.data)
save_id = data['save_id']
save_name = data['save_name']
if not save_name:
save_name = "NO NAME"
nodes = data['nodes']
edges = data['edges']
print nodes
print edges
graph = Graph.query.get(save_id)
if graph is None:
graph = Graph()
graph.owners = [current_user]
db.session.add(graph)
graph.name = save_name
view = GraphViewRevision()
view.nodes = pickle.dumps(subjective_graph_nodes(nodes))
view.edges = pickle.dumps(subjective_graph_edges(edges))
view.author = current_user
view.timestamp = datetime.datetime.now()
revision = GraphRevision()
revision.previous_revision_id = graph.current_revision_id
revision.author = current_user
revision.timestamp = datetime.datetime.now()
revision.nodes = pickle.dumps(objective_graph_nodes(nodes))
revision.edges = pickle.dumps(objective_graph_edges(edges))
graph.views.append(view)
graph.revisions.append(revision)
# Save graph
db.session.commit()
graph.current_revision_id = revision.id
db.session.commit()
# Send notification of graph update to all owners & helpers
# (except for current user)
updater_name = current_user.first_name + " " + current_user.last_name
graph_diff_url = request.host + url_for('graph_diff',
id=graph.current_revision_id)
#print "graph.owners: "
for u in graph.owners:
if u.id != current_user.id:
msg_to_owner = Message(
"%s updated graph \"%s\"" % (updater_name, graph.name),
recipients=[u.email])
msg_to_owner.body = render_template('emails/owner_update.html',
user=u, updater_name=updater_name,
graph=graph,
graph_diff_url=graph_diff_url)
mail.send(msg_to_owner)
#print "graph.helpers: "
for u in graph.helpers:
if u.id != current_user.id:
#print u.first_name + " " + u.last_name
msg_to_helper = Message(
"%s updated graph \"%s\"" % (updater_name, graph.name),
recipients=[u.email])
msg_to_helper.body = render_template('emails/helper_update.html',
user=u, updater_name=updater_name,
graph=graph,
graph_diff_url=graph_diff_url)
mail.send(msg_to_helper)
return jsonify(result="success")
@app.route('/_share_graph', methods=['POST'])
@login_required # Limits access to authenticated users
def share_graph():
data = json.loads(request.data)
graph_id = data['graph_id']
user_id = data['user_id']
graph = Graph.query.get(graph_id)
user = User.query.get(user_id)
graph.helpers.append(user)
db.session.commit()
return jsonify(result="success")
# The Admin page is accessible to users with the 'admin' role
@app.route('/admin')
@roles_accepted('admin') # Limits access to users with the 'admin' role
def admin_page():
return render_template('pages/admin_page.html')
# The Admin page is accessible to users with the 'admin' role
@app.route('/user/<id>')
@login_required
def user_page(id):
user = User.query.get(id)
return render_template('pages/user_page.html', user=user)
@app.route('/friends', methods=['GET', 'POST'])
@login_required
def friends_page():
# get incoming *non-confirmed* friendship invites based on either ID _or_ email address
invites = list(FriendshipInvite.query.filter( \
(FriendshipInvite.confirmed_at==None) & \
((FriendshipInvite.friendee_id==current_user.id) | \
(FriendshipInvite.friendee_email==current_user.email))).all())
# make invites unique in case there are duplicate invites
unique_invites = []
inviter_tracker = {}
for i in invites:
if i.friender_id not in inviter_tracker:
unique_invites.append(i)
inviter_tracker[i.friender_id] = 1
# print [invite.friender_id for invite in unique_invites]
# TODO: if not using Form method, can remove below and FriendForm in models.py
form = FriendForm(request.form)
return render_template('pages/friends_page.html',
friendships=current_user.friendships, form=form, incoming_invites=unique_invites)
@app.route('/_invite_friend', methods=['POST'])
@login_required
def invite_friend():
data = json.loads(request.data)
to_email = data['email']
inviter_name = current_user.first_name + " " + current_user.last_name
confirm_friend_url = request.host + url_for("friends_page")
register_url = request.host + url_for("user.register")
new_invite = FriendshipInvite()
new_invite.friender_id = current_user.id
new_invite.invited_at = datetime.datetime.utcnow()
to_users = list(User.query.filter(User.email==to_email).all())
if len(to_users) == 1:
# invite recipient already has an account
new_invite.friendee_id = to_users[0].id
db.session.add(new_invite)
db.session.commit()
msg = Message("Friend Request from " + inviter_name, recipients=[to_email])
msg.body = inviter_name + " has invited you to be friends on Nash! \n\nPlease visit " + confirm_friend_url + " to confirm the friend request. \n\nThanks,\n- Nash"
mail.send(msg)
else:
# invite recipient does NOT already have an account, will need to join Nash
new_invite.friendee_email = to_email
db.session.add(new_invite)
db.session.commit()
msg = Message("Invite from " + inviter_name + " to Nash", recipients=[to_email])
msg.body = inviter_name + " has invited you to be friends on Nash, a tool for reality testing. \n\nPlease visit " + register_url + " to sign up for Nash! \n\nYou can then visit " + confirm_friend_url + " confirm the friend request. \n\nThanks,\n- Nash"
mail.send(msg)
return jsonify(result="success")
@app.route('/_confirm_friend', methods=['POST'])
@login_required
def confirm_friend():
data = json.loads(request.data)
friend_id = data['friend_id']
friend = User.query.get(friend_id)
# update confirmed_at for all invites with friender_id=friend_id
# and target friend as current user
FriendshipInvite.query.filter( \
(FriendshipInvite.friender_id==friend_id) & \
(FriendshipInvite.confirmed_at==None) & \
((FriendshipInvite.friendee_id==current_user.id) | \
(FriendshipInvite.friendee_email==current_user.email))).\
update({FriendshipInvite.confirmed_at: datetime.datetime.utcnow()})
# add to actual friends list for both current_user & friend
friendship = Friendship()
friendship.friender = current_user
friendship.friendee = friend
friendship_mutual = Friendship()
friendship_mutual.friender = friend
friendship_mutual.friendee = current_user
# save all changes
db.session.commit()
return jsonify(result="success")
@app.route('/pages/profile', methods=['GET', 'POST'])
@login_required
def user_profile_page():
# Initialize form
form = UserProfileForm(CombinedMultiDict((request.files, request.form)), current_user)
# Process valid POST
if request.method == 'POST' and form.validate():
# Save photo
if form.photo.data.filename != "" :
f = form.photo.data
img = process_profile_picture(f.stream)
orig_filename, file_extension = os.path.splitext(f.filename)
filename = str(uuid.uuid4()) + file_extension
img.save(os.path.join(
app.instance_path, 'photos', filename
))
current_user.photo_file_name = filename
# Copy form fields to user_profile fields
form.populate_obj(current_user)
# Save user_profile
db.session.commit()
flash('Profile updated successfully.', 'success')
# Process GET or invalid POST
return render_template('pages/user_profile_page.html',
form=form)
| piyushhari/nash | app/views.py | Python | bsd-2-clause | 14,015 | [
"VisIt"
] | fa1fcd5ba24bc752aab86fc19cad52cf7923791519f648dae44669b0ae95698d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.