text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import sys
sys.path.append( '../../_build/moose' )
import moose
print( 'Using moose from %s' % moose.__file__ )
f = moose.Function( '/f' )
f.expr = 'rand(1)'
moose.reinit()
for i in range( 10 ):
moose.start( 1 )
print f.value
|
rahulgayatri23/moose-core
|
tests/issues/issue_muparser_rand.py
|
Python
|
gpl-3.0
| 235
|
[
"MOOSE"
] |
ef044dc24af57afcd3bd3a389f3ee3a925f14694cbd8f742abbca67d53aec4e4
|
# -*- coding: utf-8 -*-
"""
[Python 2.7 (Mayavi is not yet compatible with Python 3+)]
Created on Tue Feb 10 18:27:17 2015
@author: Ryan Stauffer
https://github.com/ryanpstauffer/market-vis
Market Visualization Prototype
Visualization and Interactive module
"""
import numpy as np
import moviepy.editor as mpy
def visualizePrices(prices):
'''Creates a mayavi visualization of a pd DataFrame containing stock prices
Inputs:
prices => a pd DataFrame, w/ index: dates; columns: company names
'''
#Imports mlab here to delay starting of mayavi engine until necessary
from mayavi import mlab
#Because of current mayavi requirements, replaces dates and company names with integers
x_length, y_length = prices.shape
xTime = np.array([list(xrange(x_length)),] * y_length).transpose()
yCompanies = np.array([list(xrange(y_length)),] * x_length)
#Sort indexed prices by total return on last date
lastDatePrices = prices.iloc[-1]
lastDatePrices.sort_values(inplace=True)
sortOrder = lastDatePrices.index
zPrices = prices[sortOrder]
#Create mayavi2 object
fig = mlab.figure(bgcolor=(.4,.4,.4))
vis = mlab.surf(xTime, yCompanies, zPrices)
mlab.outline(vis)
mlab.orientation_axes(vis)
#mlab.title('S&P 500 Market Data Visualization', size = .25)
mlab.axes(vis, nb_labels=0, xlabel = 'Time', ylabel = 'Company', zlabel = 'Price')
mlab.show()
def make_frame(t):
mlab.view(elevation=70, azimuth=360*t/4.0, distance=1400) #Camera angle
return mlab.screenshot(antialiased=True)
def animateGIF(filename, prices):
'''Creates a mayavi visualization of a pd DataFrame containing stock prices
Then uses MoviePy to animate and save as a gif
Inputs:
prices => a pd DataFrame, w/ index: dates; columns: company names
'''
#Imports mlab here to delay starting of mayavi engine until necessary
from mayavi import mlab
#Because of current mayavi requirements, replaces dates and company names with integers
x_length, y_length = prices.shape
xTime = np.array([list(xrange(x_length)),] * y_length).transpose()
yCompanies = np.array([list(xrange(y_length)),] * x_length)
#Sort indexed prices by total return on last date
lastDatePrices = prices.iloc[-1]
lastDatePrices.sort_values(inplace=True)
sortOrder = lastDatePrices.index
zPrices = prices[sortOrder]
#Create mayavi2 object
fig = mlab.figure(bgcolor=(.4,.4,.4))
vis = mlab.surf(xTime, yCompanies, zPrices)
mlab.outline(vis)
mlab.orientation_axes(vis)
mlab.axes(vis, nb_labels=0, xlabel = 'Time', ylabel = 'Company', zlabel = 'Price')
animation = mpy.VideoClip(make_frame, duration = 4).resize(1.0)
animation.write_gif(filename, fps=20)
|
ryanpstauffer/market-vis
|
marketvis/visualization.py
|
Python
|
mit
| 2,880
|
[
"Mayavi"
] |
fabddea25f885b7e0d92c6c2fb8ba234eba93aa6823d3f9931af2753c5e3b767
|
from __future__ import division
import numpy as np
import pylab as pl
from mpl_toolkits.axes_grid1 import make_axes_locatable
import fastfit
# Set for our camera
sx, sy, pixelsize = 640, 480, 5.6
def sizetext(sx, sy):
"""
Check if the difference between the two axes are similar or different
Returns displayable text
"""
if abs(sx - sy)/(sx+sy) < 0.05:
csign = '~'
elif (sx > sy):
csign = '>'
else:
csign = '<'
ctext = "wx : wy\n%.1f %s %.1f" %(sx, csign, sy)
return ctext
def limits(centre, out, valid):
""" Calculate matrix borders with limits """
bottom = max(valid[0], centre-out)
top = min(valid[1], centre+out)
return bottom, top
def gauss(x, x0, sx, scale):
""" Simple gaussian """
return np.exp(-(x-x0)**2 / (2 * sx**2)) * scale
def preparedata(data):
""" Prepare data for processing by cutting down on the area for D4s
Input:
------
data: original reading values in numpy array from
Output:
------
prepared: data in the cropped area
(xbottom, ybottom): value of the starting corner in two dimension
"""
dh, dw = data.shape
border = np.array([])
border = np.append(border, np.ravel(data[:-1, 0]))
border = np.append(border, np.ravel(data[0, 1:]))
border = np.append(border, np.ravel(data[1:, -1]))
border = np.append(border, np.ravel(data[-1, :-1]))
borderavg, bordervar = np.mean(border), np.var(border)
slimdata = np.copy(data) - borderavg
maxy, maxx = np.unravel_index(np.argmax(slimdata), slimdata.shape)
# Find the number of points in the x/y direction that are above the background noise
xl = slimdata[:, maxx]
xdim = sum(sum([xl > 10*bordervar]))
yl = slimdata[maxy, :]
ydim = sum(sum([yl > 10*bordervar]))
dim = 2 * max(xdim, ydim)
if dim < 5:
# In this case, most likely we don't have a peak, just noise, since
# very few points are over the noiselevel
return data, (0, 0)
xbottom, xtop = limits(maxx, dim, (0, dw))
ybottom, ytop = limits(maxy, dim, (0, dh))
testdata = slimdata[ybottom:ytop, xbottom:xtop]
xx, yy, dx, dy, angle = fastfit.d4s(testdata)
xc = int(xbottom+xx)
yc = int(ybottom+yy)
limr = 0.69
limx = int(dx * limr)
limy = int(dy * limr)
xbottom, xtop = limits(xc, limx, (0, dw))
ybottom, ytop = limits(yc, limy, (0, dh))
prepared = slimdata[ybottom:ytop, xbottom:xtop]
return prepared, (xbottom, ybottom)
def analyze(data):
""" Do all the analysis that's needed to create the interface """
prepared, centre = preparedata(data)
xx, yy, dx, dy, angle = fastfit.d4s(prepared)
xx += centre[0]
yy += centre[1]
xr, yr = fastfit.getellipse(xx, yy, dx, dy, angle)
# fix axes calculation so no more -1 is needed
angle *= -1
adeg = "%.1f deg" %(angle / np.pi * 180)
xxx = [xx - dx/2*np.cos(angle), xx + dx/2*np.cos(angle)]
xxy = [yy + dx/2*np.sin(angle), yy - dx/2*np.sin(angle)]
yyx = [xx + dy/2*np.sin(angle), xx - dy/2*np.sin(angle)]
yyy = [yy + dy/2*np.cos(angle), yy - dy/2*np.cos(angle)]
xwidth = (dx*np.cos(angle)**2 + dy*np.sin(angle)**2)/4.0
ywidth = (dx*np.sin(angle)**2 + dy*np.cos(angle)**2)/4.0
try:
xc = int(np.round(xx))
yc = int(np.round(yy))
except ValueError:
xc = 320
yc = 240
sy, sx = data.shape
xcut = data[yc, :]
ycut = data[:, xc]
xline = range(0, sx)
yline = range(0, sy)
xcutg = gauss(xline, xx, xwidth, max(xcut))
ycutg = gauss(yline, yy, ywidth, max(ycut))
return (xx, yy, dx, dy, angle, xr, yr, adeg, xxx, xxy, yyx, yyy, xwidth, ywidth, xc, yc, xcut, ycut, xcutg, ycutg)
def createiface(data):
(xx, yy, dx, dy, angle, xr, yr, adeg, xxx, xxy, yyx, yyy, xwidth, ywidth, xc, yc, xcut, ycut, xcutg, ycutg) = analyze(data)
sy, sx = data.shape
st = sizetext(dx/2*pixelsize, dy/2*pixelsize)
text = "Data range: %d - %d" %(np.min(data), np.max(data))
fs = 12.5
fig = pl.figure(num=1, figsize=(fs, fs))
axImg = pl.subplot(111)
img = axImg.imshow(data, vmin=0, vmax=256, cmap='Paired')
centre, = axImg.plot(xx, yy, '+', markersize=10)
ellipse, = axImg.plot(xr, yr, 'k-', linewidth=3)
ax1, ax2, = axImg.plot(xxx, xxy, 'k-', yyx, yyy, 'k-', linewidth=2)
divider = make_axes_locatable(axImg)
xline = range(0, sx)
yline = range(0, sy)
cutlinewidth=3
fitlinewidth=2
axCuty = divider.append_axes("right", size=1.4, pad=0.1, sharey=axImg)
yl, = axCuty.plot(ycut, yline, 'k-', linewidth=cutlinewidth)
yg, = axCuty.plot(ycutg, yline, 'r-', linewidth=fitlinewidth)
axCutx = divider.append_axes("bottom", size=1.4, pad=0.1, sharex=axImg)
xl, = axCutx.plot(xline, xcut, 'k-', linewidth=cutlinewidth)
xg, = axCutx.plot(xline, xcutg, 'r-', linewidth=fitlinewidth)
# Setting up limits
axImg.set_xlim([0, sx])
axImg.set_ylim([sy, 0])
axImg.set_xticks([])
axImg.set_yticks([])
axCuty.set_xlim([-10, 256])
axCuty.set_xticks([0, 128, 256])
axCutx.set_ylim([256, -10])
axCutx.set_yticks([0, 128, 256])
# Header text
htext = fig.text(0.5,
0.99,
'wx,wy = D4s/2*pixelsize [um] along principal axes\n = beam waists for Gaussian beam',
horizontalalignment='center',
verticalalignment='top',
fontsize=16,
family='monospace',
)
# Size text
sztext = fig.text(0.5,
0.81,
st,
horizontalalignment='center',
verticalalignment='baseline',
fontsize=67,
family='monospace',
)
# Angle text
atext = fig.text(0.5,
0.10,
adeg,
horizontalalignment='center',
verticalalignment='baseline',
fontsize=65,
family='monospace')
# Rangetext
uptext = fig.text(0.5,
0.05,
text,
horizontalalignment='center',
verticalalignment='baseline',
fontsize=25,
family='monospace',
)
ret = (fig, img, centre, ellipse, ax1, ax2, xl, xg, yl, yg, sztext, uptext, atext)
return ret
def updateiface(data, elements):
"""
Run only updates of data on the interface
data : input data
elements : output of createiface(data)
"""
(xx, yy, dx, dy, angle, xr, yr, adeg, xxx, xxy, yyx, yyy, xwidth, ywidth, xc, yc, xcut, ycut, xcutg, ycutg) = analyze(data)
(fig, img, centre, ellipse, ax1, ax2, xl, xg, yl, yg, sztext, uptext, atext) = elements
xline = range(0, sx)
yline = range(0, sy)
img.set_data(data)
centre.set_xdata(xx)
centre.set_ydata(yy)
ellipse.set_xdata(xr)
ellipse.set_ydata(yr)
ax1.set_xdata(xxx)
ax1.set_ydata(xxy)
ax2.set_xdata(yyx)
ax2.set_ydata(yyy)
xl.set_ydata(xcut)
xg.set_ydata(xcutg)
yl.set_xdata(ycut)
yg.set_xdata(ycutg)
st = sizetext(dx/2*pixelsize, dy/2*pixelsize)
text = "Data range: %d - %d" %(np.min(data), np.max(data))
uptext.set_text(text)
sztext.set_text(st)
atext.set_text(adeg)
if __name__ == "__main__":
filename = "test.txt"
data = np.loadtxt(filename)
elements = createiface(data)
updateiface(data, elements)
pl.show()
|
UltracoldAtomsLab/labhardware
|
projects/beamprofile/interface.py
|
Python
|
mit
| 7,631
|
[
"Gaussian"
] |
4b568beb262a986dd8c24255f67746c3e56507f18ada7ffb37dcd67ae4c1c284
|
__RCSID__ = "$Id$"
'''
FileCatalogFactory class to create file catalog client objects according to the
configuration description
'''
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Resources.Catalog.FileCatalogProxyClient import FileCatalogProxyClient
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
class FileCatalogFactory:
def __init__( self ):
self.log = gLogger.getSubLogger( 'FileCatalogFactory' )
def createCatalog( self, catalogName, useProxy = False, catalogConfig = None, vo = None ):
""" Create a file catalog object from its name and CS description
"""
if useProxy:
catalog = FileCatalogProxyClient( catalogName )
return S_OK( catalog )
# get the CS description first
catConfig = catalogConfig
if not catConfig:
if not vo:
result = getVOfromProxyGroup()
if not result['OK']:
return result
vo = result['Value']
reHelper = Resources( vo = vo )
result = reHelper.getCatalogOptionsDict( catalogName )
if not result['OK']:
return result
catConfig = result['Value']
catalogType = catConfig.get('CatalogType', catalogName)
catalogURL = catConfig.get('CatalogURL', "DataManagement/" + catalogType)
self.log.verbose( 'Creating %s client' % catalogName )
objectLoader = ObjectLoader()
result = objectLoader.loadObject( 'Resources.Catalog.%sClient' % catalogType, catalogType+'Client' )
if not result['OK']:
gLogger.error( 'Failed to load catalog object: %s' % result['Message'] )
return result
catalogClass = result['Value']
try:
# FIXME: is it really needed? This is the factory, can't this be moved out?
if catalogType in [ 'LcgFileCatalogCombined', 'LcgFileCatalog' ]:
# The LFC special case
infoSys = catConfig.get( 'LcgGfalInfosys', '' )
host = catConfig.get( 'MasterHost', '' )
catalog = catalogClass( infoSys, host )
else:
if catalogURL:
catalog = catalogClass( url = catalogURL )
else:
catalog = catalogClass()
self.log.debug( 'Loaded module %sClient' % catalogType )
return S_OK( catalog )
except Exception, x:
errStr = "Failed to instantiate %s()" % ( catalogType )
gLogger.exception( errStr, lException = x )
return S_ERROR( errStr )
# Catalog module was not loaded
return S_ERROR( 'No suitable client found for %s' % catalogName )
|
sposs/DIRAC
|
Resources/Catalog/FileCatalogFactory.py
|
Python
|
gpl-3.0
| 2,618
|
[
"DIRAC"
] |
f4a36bda6d27de028908c5e610bb8697fc389190875818f88d51c848c31f4c20
|
"""
.. module:: parser_mp
:synopsis: Definition of the command line options
.. moduleauthor:: Benjamin Audren <benjamin.audren@epfl.ch>
.. moduleauthor:: Francesco Montesano <franz.bergesund@gmail.com>
Defines the command line options and their help messages in
:func:`create_parser` and read the input command line in :func:`parse`, dealing
with different possible configurations.
The fancy short/long help formatting, as well as the automatic help creation
from docstrings is entirely due to Francesco Montesano.
"""
import os
import sys
import textwrap as tw
import re
import argparse as ap # Python module to handle command line arguments
import warnings
import io_mp
# -- custom Argument Parser that throws an io_mp.ConfigurationError
# -- for unified look within montepython
class MpArgumentParser(ap.ArgumentParser):
"""Extension of the default ArgumentParser"""
def error(self, message):
"""Override method to raise error
Parameters
----------
message: string
error message
"""
raise io_mp.ConfigurationError(message)
def safe_parse_args(self, args=None):
"""
Allows to set a default subparser
This trick is there to maintain the previous way of calling
MontePython.py
"""
args = self.set_default_subparser('run', args)
return self.parse_args(args)
def set_default_subparser(self, default, args=None):
"""
If no subparser option is found, add the default one
.. note::
This function relies on the fact that all calls to MontePython will
start with a `-`. If this came to change, this function should be
revisited
"""
if not args:
args = sys.argv[1:]
if args[0] not in ['-h', '--help', '--version', '-info']:
if args[0].find('-') != -1:
msg = "Defaulting to the 'run' command. Please update the"
msg += " call of MontePython. For more info, see the help"
msg += " string and/or the documentation "
warnings.warn(msg)
args.insert(0, default)
elif args[0] == '-info':
msg = "The info option has been turned into a command. "
msg += "Please substitute '-info' with 'info' when running "
msg += "MontePython"
warnings.warn(msg)
args[0] = 'info'
return args
# -- custom argparse types
# -- check that the argument is a positive integer
def positive_int(string):
"""
Check if the input is integer positive
Parameters
----------
string: string
string to parse
output: int
return the integer
"""
try:
value = int(string)
if value <= 0:
raise ValueError
return value
except ValueError:
raise ap.ArgumentTypeError(
"You asked for a non-positive number of steps. "
"I am not sure what to do, so I will exit. Sorry.")
# -- check that the argument is an existing file
def existing_file(fname):
"""
Check if the file exists. If not raise an error
Parameters
----------
fname: string
file name to parse
Returns
-------
fname : string
"""
if os.path.isfile(fname):
return fname
else:
msg = "The file '{}' does not exist".format(fname)
raise ap.ArgumentTypeError(msg)
def parse_docstring(docstring, key_symbol="<**>", description_symbol="<++>"):
"""
Extract from the docstring the keys and description, return it as a dict
Parameters
----------
docstring : str
key_symbol : str
identifies the key of an argument/option
description_symbol : str
identify the description of an argument/option
output
------
helpdic : dict
help strings for the parser
"""
# remove new lines and multiple whitespaces
whitespaces = re.compile(r"\s+")
docstring = whitespaces.sub(" ", docstring)
# escape special characters
key_symbol = re.escape(key_symbol)
description_symbol = re.escape(description_symbol)
# define the regular expressions to match the key and the description
key_match = r'{0}-{{0,2}}(.+?){0}'
re_key = re.compile(key_match.format(key_symbol))
desc_match = r'({0}.+?{0}.+?{0})'
re_desc = re.compile(desc_match.format(description_symbol))
# get all and check that the keys and descriptions have the same lenghts
keys = re_key.findall(docstring)
descriptions = re_desc.findall(docstring)
if len(keys) != len(descriptions):
msg = "The option keys and their descriptions have different lenghts.\n"
msg += "Make sure that there are as many string surrounded by '{0}'"
msg += " as there are surrounded by '{1}"
raise ValueError(msg.format(key_symbol, description_symbol))
helpdict = dict(zip(keys, descriptions))
return helpdict
def custom_help(split_string="<++>"):
"""
Create a custom help action.
It expects *split_string* to appear in groups of three.
If the option string is '-h', then uses the short description
between the first two *split_string*.
If the option string is '-h', then uses all that is between
the first and the third *split_string*, stripping the first one.
Parameters
----------
split_string: str
string to use to select the help string and how to select them.
They must appear in groups of *3*
output
------
CustomHelp: class definition
"""
class CustomHelp(ap._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
# create the help string and store it into a string
from StringIO import StringIO
fstr = StringIO()
try:
parser.print_help(file=fstr)
help_str = fstr.getvalue()
finally:
fstr.close()
# create the regular expression to match the desciption
descmatch = r'{0}(.+?){0}(.+?){0}'
# escape possible dangerous characters
esplit_string = re.escape(split_string)
re_desc = re.compile(descmatch.format(esplit_string),
flags=re.DOTALL)
# select the case according to which option_string is selected
if option_string == '-h':
to_sub = r'\1'
elif option_string == '--help':
to_sub = r'\1\2'
print(re_desc.sub(to_sub, help_str))
parser.exit()
return CustomHelp
def add_subparser(sp, name, **kwargs):
"""
Add a parser to the subparser *sp* with *name*.
All the logic common to all subparsers should go here
Parameters
----------
sp: subparser instance
name: str
name of the subparser
kwargs: dict
keywords to pass to the subparser
output
------
sparser: Argparse instance
new subparser
"""
kwargs["add_help"] = False
kwargs['formatter_class'] = ap.ArgumentDefaultsHelpFormatter
sparser = sp.add_parser(name, **kwargs)
sparser.add_argument("-h", "--help", action=custom_help(),
help="print the short or long help")
return sparser
def get_dict_from_docstring(key_symbol="<**>", description_symbol="<++>"):
"""
Create the decorator
Parameters
----------
key_symbol : str
identifies the key of a argument/option
description_symbol: str
identify the description of a argument/option
Returns
------
wrapper: function
"""
def wrapper(func):
"""
Decorator that wraps the function that implement the parser, parses the
`__doc__` and construct a dictionary with the help strings. The
dictionary is added as an attribute of `func` and can be accessed in
the function
Parameters
----------
func: function
function with the docs to be parsed
Returns
------
func: function
function with the dictionary added. *key_symbol* and
*description_symbol* strings are removed
"""
docstring = func.__doc__
helpdict = parse_docstring(
docstring, key_symbol=key_symbol,
description_symbol=description_symbol)
func.helpdict = helpdict
# remove markers
docstring = docstring.replace(key_symbol, '')
func.__doc__ = docstring.replace(description_symbol, '')
return func
return wrapper
def initialise_parser(**kwargs):
"""
Create the argument parser and returns it
Parameters
----------
kwargs: dictionary
keyword to pass to the parser
output
------
p: MpArgumentParser instance
parser with some keyword added
"""
kwargs['formatter_class'] = ap.ArgumentDefaultsHelpFormatter
p = MpArgumentParser(**kwargs)
# -- version
path_file = os.path.sep.join(
os.path.abspath(__file__).split(os.path.sep)[:-2])
with open(os.path.join(path_file, 'VERSION'), 'r') as version_file:
version = version_file.readline()
p.add_argument('--version', action='version', version=version)
p.add_argument("-v", "--verbose", action="store_true", help="Verbose mode")
return p
@get_dict_from_docstring()
def create_parser():
"""
Definition of the parser command line options
The main parser has so far two subparsers, corresponding to the two main
modes of operating the code, namely `run` and `info`. If you simply call
:code:`python montepython/MontePython.py -h`, you will find only this piece
of information. To go further, and find the command line options specific
to these two submodes, one should then do: :code:`python
montepython/MontePython.py run -h`, or :code:`info -h`.
All command line arguments are defined below, for each of the two
subparsers. This function create the automatic help command.
Each flag outputs the following argument to a destination variable,
specified by the `dest` keyword argument in the source code. Please check
there to understand the variable names associated with each option.
Options
-------
**run**
<**>-N<**> : int
<++>number of steps in the chain<++> (**OBL**). Note that when
running on a cluster, your run might be stopped before reaching
this number.<++>
<**>-o<**> : str
<++>output folder<++> (**OBL**). For instance :code:`-o
chains/myexperiments/mymodel`. Note that in this example, the
folder :code:`chains/myexperiments` must already exist.<++>
<**>-p<**> : str
<++>input parameter file<++> (**OBL**). For example :code:`-p
input/exoticmodel.param`.<++>
<**>-c<**> : str
<++>input covariance matrix<++> (*OPT*). A covariance matrix is
created when analyzing previous runs.
Note that the list of parameters in the input covariance matrix and
in the run do not necessarily coincide.<++>
<**>-j<**> : str
<++>jumping method<++> (`global` (default), `sequential` or `fast`)
(*OPT*).
With the `global` method the code generates a new random direction
at each step, with the `sequential` one it cycles over the
eigenvectors of the proposal density (= input covariance matrix).
The `global` method the acceptance rate is usually lower but the
points in the chains are less correlated. We recommend using the
sequential method to get started in difficult cases, when the
proposal density is very bad, in order to accumulate points and
generate a covariance matrix to be used later with the `default`
jumping method.
The `fast` method implements the Cholesky decomposition presented
in http://arxiv.org/abs/1304.4473 by Antony Lewis.<++>
<**>-m<**> : str
<++>sampling method<++>, by default 'MH' for Metropolis-Hastings,
can be set to 'NS' for Nested Sampling (using Multinest wrapper
PyMultiNest), 'CH' for Cosmo Hammer (using the Cosmo Hammer wrapper
to emcee algorithm), and finally 'IS' for importance sampling.
Note that when running with Importance sampling, you need to
specify a folder to start from.<++>
<**>--update<**> : int
<++>update frequency for Metropolis Hastings.<++>
If greater than zero, number of steps after which the proposal covariance
matrix is updated automatically (*OPT*).<++>
<**>-f<**> : float
<++>jumping factor<++> (>= 0, default to 2.4) (*OPT*).
The proposal density is given by the input covariance matrix (or a
diagonal matrix with elements given by the square of the input
sigma's) multiplied by the square of this factor. In other words, a
typical jump will have an amplitude given by sigma times this
factor.
The default is the famous factor 2.4, advertised by Dunkley
et al. to be an optimal trade-off between high acceptance rate and
high correlation of chain elements, at least for multivariate
gaussian posterior probabilities. It can be a good idea to reduce
this factor for very non-gaussian posteriors.
Using :code:`-f 0 -N 1` is a convenient way to get the likelihood
exactly at the starting point passed in input.<++>
<**>--conf<**> : str
<++>configuration file<++> (default to `default.conf`) (*OPT*).
This file contains the path to your cosmological module
directory.<++>
<**>--chain-number<**> : str
arbitrary <++>number of the output chain<++>, to overcome the
automatic one (*OPT*).
By default, the chains are named :code:`yyyy-mm-dd_N__i.txt` with
year, month and day being extracted, :code:`N` being the number of
steps, and :code:`i` an automatically updated index.
This means that running several times the code with the same
command will create different chains automatically.
This option is a way to enforce a particular number :code:`i`.
This can be useful when running on a cluster: for instance you may
ask your script to use the job number as :code:`i`.<++>
<**>-r<**> : str
<++>restart from last point in chain<++>, to avoid the burn-in
stage (*OPT*).
At the beginning of the run, the previous chain will be deleted,
and its content transfered to the beginning of the new chain.<++>
<**>-b<**> : str
<++>start a new chain from the bestfit file<++> computed with
analyze. (*OPT*)<++>
<**>--fisher<**> : None
<++>Calculates the inverse of the fisher matrix<++> to use as
proposal distribution<++>
<**>--silent<**> : None
<++>silence the standard output<++> (useful when running on
clusters)<++>
<**>--Der-target-folder<**> : str
<++>Add additional derived params to this folder<++>. It has to be
used in conjunction with `Der-param-list`, and the method set to
Der: :code:`-m Der`. (*OPT*)<++>
<**>--Der-param-list<**> : str
<++>Specify a number of derived parameters to be added<++>. A
complete example would be to add Omega_Lambda as a derived
parameter:
:code:`python montepython/MontePython.py run -o existing_folder
-m Der --Der-target-folder non_existing_folder --Der-param-list
Omega_Lambda`<++>
<**>--IS-starting-folder<**> : str
<++>Perform Importance Sampling from this folder or set of
chains<++> (*OPT*)<++>
<**>--stop-after-update<**> : bool
<++>When using update mode, stop run after updating the covariant matrix.<++>
Useful if you want to change settings after the first guess (*OPT*) (flag)<++>
<**>--display-each-chi2<**> : bool
<++>Shows the effective chi2 from each likelihood and the total.<++>
Useful e.g. if you run at the bestfit point with -f 0 (flag)<++>
For Nested Sampling and Cosmo Hammer arguments, see
:mod:`nested_sampling` and :mod:`cosmo_hammer`.
**info**
Replaces the old **-info** command, which is deprecated but still
available.
<**>files<**> : string/list of strings
<++>you can specify either single files, or a complete folder<++>,
for example :code:`info chains/my-run/2012-10-26*`, or :code:`info
chains/my-run`.
If you specify several folders (or set of files), a comparison
will be performed.<++>
<**>--minimal<**> : None
<++>use this flag to avoid computing the posterior
distribution.<++> This will decrease the time needed for the
analysis, especially when analyzing big folders.<++>
<**>--bins<**> : int
<++>number of bins in the histograms<++> used to derive posterior
probabilities and credible intervals (default to 20). Decrease this
number for smoother plots at the expense of masking details.<++>
<**>--no-mean<**> : None
<++>remove the mean likelihood from the plot<++>. By default, when
plotting marginalised 1D posteriors, the code also shows the mean
likelihood per bin with dashed lines; this flag switches off the
dashed lines.<++>
<**>--short-title-1d<**> : None
<++>short 1D plot titles<++>. Remove mean and confidence limits above each 1D plots.<++>
<**>--extra<**> : str
<++>extra file to customize the output plots<++>. You can actually
set all the possible options in this file, including line-width,
ticknumber, ticksize, etc... You can specify four fields,
`info.redefine` (dict with keys set to the previous variable, and
the value set to a numerical computation that should replace this
variable), `info.to_change` (dict with keys set to the old variable
name, and value set to the new variable name), `info.to_plot` (list
of variables with new names to plot), and `info.new_scales` (dict
with keys set to the new variable names, and values set to the
number by which it should be multiplied in the graph).<++> For
instance,
.. code::
info.to_change={'oldname1':'newname1','oldname2':'newname2',...}
info.to_plot=['name1','name2','newname3',...]
info.new_scales={'name1':number1,'name2':number2,...}
<**>--noplot<**> : bool
<++>do not produce any plot, simply compute the posterior<++>
(*OPT*) (flag)<++>
<**>--noplot-2d<**> : bool
<++>produce only the 1d posterior plot<++> (*OPT*) (flag)<++>
<**>--contours-only<**> : bool
<++>do not fill the contours on the 2d plots<++> (*OPT*) (flag)<++>
<**>--all<**> : None
<++>output every subplot and data in separate files<++> (*OPT*)
(flag)<++>
<**>--ext<**> : str
<++>change the extension for the output file. Any extension handled
by :code:`matplotlib` can be used<++>. (`pdf` (default), `png`
(faster))<++>
<**>--num-columns-1d<**> : int
<++>for 1d plot, number of plots per horizontal raw; if 'None' this is set automatically<++> (trying to approach a square plot).<++>
<**>--fontsize<**> : int
<++>desired fontsize<++> (default to 16)<++>
<**>--ticksize<**> : int
<++>desired ticksize<++> (default to 14)<++>
<**>--line-width<**> : int
<++>set line width<++> (default to 4)<++>
<**>--decimal<**> : int
<++>number of decimal places on ticks<++> (default to 3)<++>
<**>--ticknumber<**> : int
<++>number of ticks on each axis<++> (default to 3)<++>
<**>--legend-style<**> : str
<++>specify the style of the legend<++>, to choose from `sides` or
`top`.<++>
<**>--keep-non-markovian<**> : bool
<++>Use this flag to keep the non-markovian part of the chains produced
at the beginning of runs with --update mode<++>
This option is only relevant when the chains were produced with --update (*OPT*) (flag)<++>
<**>--keep-fraction<**> : float
<++>after burn-in removal, analyze only last fraction of each chain.<++>
(between 0 and 1). Normally one would not use this for runs with --update mode,
unless --keep-non-markovian is switched on (*OPT*)<++>
<**>--want-covmat<**> : bool
<++>calculate the covariant matrix when analyzing the chains.<++>
Warning: this will interfere with ongoing runs utilizing update mode (*OPT*) (flag)<++>
<**>--gaussian-smoothing<**> : float
<++>width of gaussian smoothing for plotting posteriors<++>,
in units of bin size, increase for smoother data<++>
<**>--interpolation-smoothing<**> : float
<++>interpolation factor for plotting posteriors<++>,
1 means no interpolation, increase for smoother curves<++>
<**>--posterior-smoothing<**> : int
<++>smoothing scheme for 1d posteriors<++>,
0 means no smoothing, 1 means cubic interpolation, higher means fitting ln(L) with polynomial of order n<++>
Returns
-------
args : NameSpace
parsed input arguments
"""
helpdict = create_parser.helpdict
# Customized usage, for more verbosity concerning these subparsers options.
usage = """%(prog)s [-h] [--version] {run,info} ... """
usage += tw.dedent("""\n
From more help on each of the subcommands, type:
%(prog)s run -h
%(prog)s info -h\n\n""")
# parser = ap.ArgumentParser(
#parser = MpArgumentParser(
#formatter_class=ap.ArgumentDefaultsHelpFormatter,
#description='Monte Python, a Monte Carlo code in Python',
#usage=usage)
parser = initialise_parser(
description='Monte Python, a Monte Carlo code in Python', usage=usage)
# -- add the subparsers
subparser = parser.add_subparsers(dest='subparser_name')
###############
# run the MCMC
runparser = add_subparser(subparser, 'run', help="run the MCMC chains")
# -- number of steps (OPTIONAL)
runparser.add_argument('-N', help=helpdict['N'], type=positive_int,
dest='N')
# -- output folder (OBLIGATORY)
runparser.add_argument('-o', '--output', help=helpdict['o'], type=str,
dest='folder')
# -- parameter file (OBLIGATORY)
runparser.add_argument('-p', '--param', help=helpdict['p'],
type=existing_file, dest='param')
# -- covariance matrix (OPTIONAL)
runparser.add_argument('-c', '--covmat', help=helpdict['c'],
type=existing_file, dest='cov')
# -- jumping method (OPTIONAL)
runparser.add_argument('-j', '--jumping', help=helpdict['j'],
dest='jumping', default='fast',
choices=['global', 'sequential', 'fast'])
# -- sampling method (OPTIONAL)
runparser.add_argument('-m', '--method', help=helpdict['m'],
dest='method', default='MH',
choices=['MH', 'NS', 'CH', 'IS', 'Der'])
# -- update Metropolis Hastings (OPTIONAL)
runparser.add_argument('--update', help=helpdict['update'], type=int,
default=0)
# -- jumping factor (OPTIONAL)
runparser.add_argument('-f', help=helpdict['f'], type=float,
dest='jumping_factor', default=2.4)
# -- fisher (EXPERIMENTAL)
runparser.add_argument('--fisher', help=helpdict['fisher'],
action='store_true')
# -- configuration file (OPTIONAL)
runparser.add_argument('--conf', help=helpdict['conf'],
type=str, dest='config_file',
default='default.conf')
# -- arbitrary numbering of an output chain (OPTIONAL)
runparser.add_argument('--chain-number', help=helpdict['chain-number'])
# -- stop run after first successful update using --update (EXPERIMENTAL)
runparser.add_argument('--stop-after-update', help=helpdict['stop-after-update'],
dest='stop_after_update', action='store_true')
# display option
runparser.add_argument('--display-each-chi2', help=helpdict['display-each-chi2'],
dest='display_each_chi2', action='store_true')
###############
# MCMC restart from chain or best fit file
runparser.add_argument('-r', help=helpdict['r'],
type=existing_file, dest='restart')
runparser.add_argument('-b', '--bestfit', dest='bf', help=helpdict['b'],
type=existing_file)
###############
# Silence the output (no print on the console)
runparser.add_argument('--silent', help=helpdict['silent'],
action='store_true')
###############
# Adding new derived parameters to a run
runparser.add_argument(
'--Der-target-folder', dest="Der_target_folder",
help=helpdict['Der-target-folder'], type=str, default='')
runparser.add_argument(
'--Der-param-list', dest='derived_parameters',
help=helpdict['Der-param-list'], type=str, default='', nargs='+')
###############
# Importance Sampling Arguments
runparser.add_argument(
'--IS-starting-folder', dest='IS_starting_folder',
help=helpdict['IS-starting-folder'], type=str, default='', nargs='+')
###############
# MultiNest arguments (all OPTIONAL and ignored if not "-m=NS")
# The default values of -1 mean to take the PyMultiNest default values
try:
from nested_sampling import NS_prefix, NS_user_arguments
NSparser = runparser.add_argument_group(
title="MultiNest",
description="Run the MCMC chains using MultiNest"
)
for arg in NS_user_arguments:
NSparser.add_argument('--'+NS_prefix+arg,
default=-1,
**NS_user_arguments[arg])
except ImportError:
# Not defined if not installed
pass
###############
# CosmoHammer arguments (all OPTIONAL and ignored if not "-m=CH")
# The default values of -1 mean to take the CosmoHammer default values
try:
from cosmo_hammer import CH_prefix, CH_user_arguments
CHparser = runparser.add_argument_group(
title="CosmoHammer",
description="Run the MCMC chains using the CosmoHammer framework")
for arg in CH_user_arguments:
CHparser.add_argument('--'+CH_prefix+arg,
default=-1,
**CH_user_arguments[arg])
except ImportError:
# Not defined if not installed
pass
###############
# Information
infoparser = add_subparser(subparser, 'info',
help="analyze the MCMC chains")
# -- folder to analyze
infoparser.add_argument('files', help=helpdict['files'],
nargs='+')
# Silence the output (no print on the console)
infoparser.add_argument('--silent', help=helpdict['silent'],
action='store_true')
# -- to only write the covmat and bestfit, without computing the posterior
infoparser.add_argument('--minimal', help=helpdict['minimal'],
action='store_true')
# -- number of bins (defaulting to 20)
infoparser.add_argument('--bins', help=helpdict['bins'],
type=int, default=20)
# -- to remove the mean-likelihood line
infoparser.add_argument('--no-mean', help=helpdict['no-mean'],
dest='mean_likelihood', action='store_false')
# -- to remove the mean and 68% limits on top of each 1D plot
infoparser.add_argument('--short-title-1d', help=helpdict['short-title-1d'],
dest='short_title_1d', action='store_true')
# -- possible plot file describing custom commands
infoparser.add_argument('--extra', help=helpdict['extra'],
dest='optional_plot_file', default='')
# -- if you just want the covariance matrix, use this option
infoparser.add_argument('--noplot', help=helpdict['noplot'],
dest='plot', action='store_false')
# -- if you just want to output 1d posterior distributions (faster)
infoparser.add_argument('--noplot-2d', help=helpdict['noplot-2d'],
dest='plot_2d', action='store_false')
# -- when plotting 2d posterior distribution, use contours and not contours
# filled (might be useful when comparing several folders)
infoparser.add_argument('--contours-only', help=helpdict['contours-only'],
dest='contours_only', action='store_true')
# -- if you want to output every single subplots
infoparser.add_argument('--all', help=helpdict['all'], dest='subplot',
action='store_true')
# -- to change the extension used to output files (pdf is the default one,
# but takes long, valid options are png and eps)
infoparser.add_argument('--ext', help=helpdict['ext'],
type=str, dest='extension', default='pdf')
# -- to set manually the number of plots per hoorizontal raw in 1d plot
infoparser.add_argument('--num-columns-1d', help=helpdict['num-columns-1d'],
type=int, dest='num_columns_1d')
# -- only analyze the markovian part of the chains
infoparser.add_argument('--keep-non-markovian', help=helpdict['keep-non-markovian'],
dest='markovian', action='store_false')
# -- fraction of chains to be analyzed after burn-in removal (defaulting to 1.0)
infoparser.add_argument('--keep-fraction', help=helpdict['keep-fraction'],
type=float, dest='keep_fraction', default=1.0)
# -- calculate the covariant matrix when analyzing the chains
infoparser.add_argument('--want-covmat', help=helpdict['want-covmat'],
dest='want_covmat', action='store_true')
# -------------------------------------
# Further customization
# -- fontsize of plots (defaulting to 16)
infoparser.add_argument('--fontsize', help=helpdict['fontsize'],
type=int, default=16)
# -- ticksize of plots (defaulting to 14)
infoparser.add_argument('--ticksize', help=helpdict['ticksize'],
type=int, default=14)
# -- linewidth of 1d plots (defaulting to 4, 2 being a bare minimum for
# legible graphs
infoparser.add_argument('--line-width', help=helpdict['line-width'],
type=int, default=4)
# -- number of decimal places that appear on the tick legend. If you want
# to increase the number of ticks, you should reduce this number
infoparser.add_argument('--decimal', help=helpdict['decimal'], type=int,
default=3)
# -- number of ticks that appear on the graph.
infoparser.add_argument('--ticknumber', help=helpdict['ticknumber'],
type=int, default=3)
# -- legend type, to choose between top (previous style) to sides (new
# style). It modifies the place where the name of the variable appear.
infoparser.add_argument('--legend-style', help=helpdict['legend-style'],
type=str, choices=['sides', 'top'],
default='sides')
# -- width of gaussian smoothing for plotting posteriors,
# in units of bin size, increase for smoother data.
infoparser.add_argument('--gaussian-smoothing', help=helpdict['gaussian-smoothing'],
type=float, default=0.5)
# interpolation factor for plotting posteriors, 1 means no interpolation,
# increase for smoother curves (it means that extra bins are created
# and interpolated between computed bins)
infoparser.add_argument('--interpolation-smoothing', help=helpdict['interpolation-smoothing'],
type=int, default=4)
infoparser.add_argument('--posterior-smoothing', help=helpdict['posterior-smoothing'],
type=int, default=5)
return parser
def parse(custom_command=''):
"""
Check some basic organization of the folder, and exit the program in case
something goes wrong.
Keyword Arguments
-----------------
custom_command : str
For testing purposes, instead of reading the command line argument,
read instead the given string. It should ommit the start of the
command, so e.g.: '-N 10 -o toto/'
"""
# Create the parser
parser = create_parser()
# Recover all command line arguments in the args dictionary, except for a
# test, where the custom_command string is read.
# Note that the function safe_parse_args is read instead of parse_args. It
# is a function defined in this file to allow for a default subparser.
if not custom_command:
args = parser.safe_parse_args()
else:
args = parser.safe_parse_args(custom_command.split(' '))
# check for MPI
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
except ImportError:
# set all chains to master if no MPI
rank = 0
# Some check to perform when running the MCMC chains is requested
if args.subparser_name == "run":
# If the user wants to start over from an existing chain, the program
# will use automatically the same folder, and the log.param in it
if args.restart is not None:
args.folder = os.path.sep.join(
args.restart.split(os.path.sep)[:-1])
args.param = os.path.join(args.folder, 'log.param')
if not args.silent:
warnings.warn(
"Restarting from %s." % args.restart +
" Using associated log.param.")
# Else, the user should provide an output folder
else:
if args.folder is None:
raise io_mp.ConfigurationError(
"You must provide an output folder, because you do not " +
"want your main folder to look dirty, do you ?")
# and if the folder already exists, and that no parameter file was
# provided, use the log.param
if os.path.isdir(args.folder):
if os.path.exists(
os.path.join(args.folder, 'log.param')):
# if the log.param exists, and that a parameter file was
# provided, take instead the log.param, and notify the
# user.
old_param = args.param
args.param = os.path.join(
args.folder, 'log.param')
if old_param is not None:
if not args.silent and not rank:
warnings.warn(
"Appending to an existing folder: using the "
"log.param instead of %s" % old_param)
else:
if args.param is None:
raise io_mp.ConfigurationError(
"The requested output folder seems empty. "
"You must then provide a parameter file (command"
" line option -p any.param)")
else:
if args.param is None:
raise io_mp.ConfigurationError(
"The requested output folder appears to be non "
"existent. You must then provide a parameter file "
"(command line option -p any.param)")
return args
|
miguelzuma/montepython_zuma
|
montepython/parser_mp.py
|
Python
|
mit
| 36,893
|
[
"Gaussian"
] |
7fc9f3d3a879966ef5b14da3181a900527e8d3428c58a4e5c56b3cd6b1e13759
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.dllib.utils.common import get_node_and_core_number
from bigdl.dllib.nncontext import init_nncontext
from bigdl.orca import OrcaContext
from bigdl.orca.data import SparkXShards
from bigdl.orca.data.utils import *
def read_csv(file_path, **kwargs):
"""
Read csv files to SparkXShards of pandas DataFrames.
:param file_path: A csv file path, a list of multiple csv file paths, or a directory
containing csv files. Local file system, HDFS, and AWS S3 are supported.
:param kwargs: You can specify read_csv options supported by pandas.
:return: An instance of SparkXShards.
"""
return read_file_spark(file_path, "csv", **kwargs)
def read_json(file_path, **kwargs):
"""
Read json files to SparkXShards of pandas DataFrames.
:param file_path: A json file path, a list of multiple json file paths, or a directory
containing json files. Local file system, HDFS, and AWS S3 are supported.
:param kwargs: You can specify read_json options supported by pandas.
:return: An instance of SparkXShards.
"""
return read_file_spark(file_path, "json", **kwargs)
def read_file_spark(file_path, file_type, **kwargs):
sc = init_nncontext()
node_num, core_num = get_node_and_core_number()
backend = OrcaContext.pandas_read_backend
if backend == "pandas":
file_url_splits = file_path.split("://")
prefix = file_url_splits[0]
file_paths = []
if isinstance(file_path, list):
[file_paths.extend(extract_one_path(path, os.environ)) for path in file_path]
else:
file_paths = extract_one_path(file_path, os.environ)
if not file_paths:
raise Exception("The file path is invalid or empty, please check your data")
num_files = len(file_paths)
total_cores = node_num * core_num
num_partitions = num_files if num_files < total_cores else total_cores
rdd = sc.parallelize(file_paths, num_partitions)
if prefix == "hdfs":
pd_rdd = rdd.mapPartitions(
lambda iter: read_pd_hdfs_file_list(iter, file_type, **kwargs))
elif prefix == "s3":
pd_rdd = rdd.mapPartitions(
lambda iter: read_pd_s3_file_list(iter, file_type, **kwargs))
else:
def loadFile(iterator):
dfs = []
for x in iterator:
df = read_pd_file(x, file_type, **kwargs)
dfs.append(df)
import pandas as pd
return [pd.concat(dfs)]
pd_rdd = rdd.mapPartitions(loadFile)
else: # Spark backend; spark.read.csv/json accepts a folder path as input
assert file_type == "json" or file_type == "csv", \
"Unsupported file type: %s. Only csv and json files are supported for now" % file_type
spark = OrcaContext.get_spark_session()
# TODO: add S3 confidentials
# The following implementation is adapted from
# https://github.com/databricks/koalas/blob/master/databricks/koalas/namespace.py
# with some modifications.
if "mangle_dupe_cols" in kwargs:
assert kwargs["mangle_dupe_cols"], "mangle_dupe_cols can only be True"
kwargs.pop("mangle_dupe_cols")
if "parse_dates" in kwargs:
assert not kwargs["parse_dates"], "parse_dates can only be False"
kwargs.pop("parse_dates")
names = kwargs.get("names", None)
if "names" in kwargs:
kwargs.pop("names")
usecols = kwargs.get("usecols", None)
if "usecols" in kwargs:
kwargs.pop("usecols")
dtype = kwargs.get("dtype", None)
if "dtype" in kwargs:
kwargs.pop("dtype")
squeeze = kwargs.get("squeeze", False)
if "squeeze" in kwargs:
kwargs.pop("squeeze")
index_col = kwargs.get("index_col", None)
if "index_col" in kwargs:
kwargs.pop("index_col")
if file_type == "csv":
# Handle pandas-compatible keyword arguments
kwargs["inferSchema"] = True
header = kwargs.get("header", "infer")
if isinstance(names, str):
kwargs["schema"] = names
if header == "infer":
header = 0 if names is None else None
if header == 0:
kwargs["header"] = True
elif header is None:
kwargs["header"] = False
else:
raise ValueError("Unknown header argument {}".format(header))
if "quotechar" in kwargs:
quotechar = kwargs["quotechar"]
kwargs.pop("quotechar")
kwargs["quote"] = quotechar
if "escapechar" in kwargs:
escapechar = kwargs["escapechar"]
kwargs.pop("escapechar")
kwargs["escape"] = escapechar
# sep and comment are the same as pandas
if "comment" in kwargs:
comment = kwargs["comment"]
if not isinstance(comment, str) or len(comment) != 1:
raise ValueError("Only length-1 comment characters supported")
df = spark.read.csv(file_path, **kwargs)
if header is None:
df = df.selectExpr(
*["`%s` as `%s`" % (field.name, i) for i, field in enumerate(df.schema)])
else:
df = spark.read.json(file_path, **kwargs)
# Handle pandas-compatible postprocessing arguments
if usecols is not None and not callable(usecols):
usecols = list(usecols)
renamed = False
if isinstance(names, list):
if len(set(names)) != len(names):
raise ValueError("Found duplicate names, please check your names input")
if usecols is not None:
if not callable(usecols):
# usecols is list
if len(names) != len(usecols) and len(names) != len(df.schema):
raise ValueError(
"Passed names did not match usecols"
)
if len(names) == len(df.schema):
df = df.selectExpr(
*["`%s` as `%s`" % (field.name, name) for field, name
in zip(df.schema, names)]
)
renamed = True
else:
if len(names) != len(df.schema):
raise ValueError(
"The number of names [%s] does not match the number "
"of columns [%d]. Try names by a Spark SQL DDL-formatted "
"string." % (len(names), len(df.schema))
)
df = df.selectExpr(
*["`%s` as `%s`" % (field.name, name) for field, name
in zip(df.schema, names)]
)
renamed = True
index_map = dict([(i, field.name) for i, field in enumerate(df.schema)])
if usecols is not None:
if callable(usecols):
cols = [field.name for field in df.schema if usecols(field.name)]
missing = []
elif all(isinstance(col, int) for col in usecols):
cols = [field.name for i, field in enumerate(df.schema) if i in usecols]
missing = [
col
for col in usecols
if col >= len(df.schema) or df.schema[col].name not in cols
]
elif all(isinstance(col, str) for col in usecols):
cols = [field.name for field in df.schema if field.name in usecols]
if isinstance(names, list):
missing = [c for c in usecols if c not in names]
else:
missing = [col for col in usecols if col not in cols]
else:
raise ValueError(
"usecols must only be list-like of all strings, "
"all unicode, all integers or a callable.")
if len(missing) > 0:
raise ValueError(
"usecols do not match columns, columns expected but not found: %s" % missing)
if len(cols) > 0:
df = df.select(cols)
if isinstance(names, list):
if not renamed:
df = df.selectExpr(
*["`%s` as `%s`" % (col, name) for col, name in zip(cols, names)]
)
# update index map after rename
for index, col in index_map.items():
if col in cols:
index_map[index] = names[cols.index(col)]
if df.rdd.getNumPartitions() < node_num:
df = df.repartition(node_num)
def to_pandas(columns, squeeze=False, index_col=None):
def f(iter):
import pandas as pd
data = list(iter)
pd_df = pd.DataFrame(data, columns=columns)
if dtype is not None:
if isinstance(dtype, dict):
for col, type in dtype.items():
if isinstance(col, str):
if col not in pd_df.columns:
raise ValueError("column to be set type is not"
" in current dataframe")
pd_df[col] = pd_df[col].astype(type)
elif isinstance(col, int):
if index_map[col] not in pd_df.columns:
raise ValueError("column index to be set type is not"
" in current dataframe")
pd_df[index_map[col]] = pd_df[index_map[col]].astype(type)
else:
pd_df = pd_df.astype(dtype)
if squeeze and len(pd_df.columns) == 1:
pd_df = pd_df.iloc[:, 0]
if index_col:
pd_df = pd_df.set_index(index_col)
return [pd_df]
return f
pd_rdd = df.rdd.mapPartitions(to_pandas(df.columns, squeeze, index_col))
try:
data_shards = SparkXShards(pd_rdd)
except Exception as e:
alternative_backend = "pandas" if backend == "spark" else "spark"
print("An error occurred when reading files with '%s' backend, you may switch to '%s' "
"backend for another try. You can set the backend using "
"OrcaContext.pandas_read_backend" % (backend, alternative_backend))
raise e
return data_shards
def read_parquet(file_path, columns=None, schema=None, **options):
"""
Read parquet files to SparkXShards of pandas DataFrames.
:param file_path: Parquet file path, a list of multiple parquet file paths, or a directory
containing parquet files. Local file system, HDFS, and AWS S3 are supported.
:param columns: list of column name, default=None.
If not None, only these columns will be read from the file.
:param schema: pyspark.sql.types.StructType for the input schema or
a DDL-formatted string (For example col0 INT, col1 DOUBLE).
:param options: other options for reading parquet.
:return: An instance of SparkXShards.
"""
sc = init_nncontext()
spark = OrcaContext.get_spark_session()
# df = spark.read.parquet(file_path)
df = spark.read.load(file_path, "parquet", schema=schema, **options)
if columns:
df = df.select(*columns)
def to_pandas(columns):
def f(iter):
import pandas as pd
data = list(iter)
pd_df = pd.DataFrame(data, columns=columns)
return [pd_df]
return f
pd_rdd = df.rdd.mapPartitions(to_pandas(df.columns))
try:
data_shards = SparkXShards(pd_rdd)
except Exception as e:
print("An error occurred when reading parquet files")
raise e
return data_shards
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/data/pandas/preprocessing.py
|
Python
|
apache-2.0
| 12,841
|
[
"ORCA"
] |
583e1266a961d721190c68489250948b72e6b34cee939997f1da6a0dd3ff23af
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for operators used in Gluon dispatched by F=ndarray."""
from ...context import current_context
from ..numpy import _internal as _npi
__all__ = ['bernoulli', 'normal_n', 'uniform_n']
def bernoulli(prob, logit, size, dtype, ctx, out):
"""Creates a Bernoulli distribution parameterized by :attr:`prob`
or :attr:`logit` (but not both).
Samples are binary (0 or 1). They take the value `1` with probability `p`
and `0` with probability `1 - p`.
Parameters
----------
prob : float, ndarray
The probability of sampling '1'.
Only one of prob or logit should be passed in.
logit : float, ndarray
The log-odds of sampling '1'.
Only one of prob or logit should be passed in.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
Desired dtype of the result. All dtypes are determined by their
name, i.e., 'int64', 'int', etc, so byteorder is not available
and a specific precision may have different C types depending
on the platform. The default value is 'np.float32'.
ctx : Context, optional
Device context of output. Default is current context.
out : symbol, optional
The output symbol (default is `None`).
Returns
-------
out : ndarray
Drawn samples from the parameterized bernoulli distribution.
Examples
--------
>>> prob = np.random.uniform(size=(4,4))
>>> logit = np.log(prob) - np.log(1 - prob)
>>> npx.random.bernoulli(logit=logit)
array([[0., 1., 1., 1.],
[0., 1., 1., 1.],
[0., 1., 0., 0.],
[1., 0., 1., 0.]])
>>> npx.random.bernoulli(prob=prob)
array([[0., 1., 0., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 0.],
[1., 0., 1., 0.]])
"""
from ...numpy import ndarray as np_ndarray
tensor_type_name = np_ndarray
if (prob is None) == (logit is None):
raise ValueError(
"Either `prob` or `logit` must be specified, but not both. " +
"Received prob={}, logit={}".format(prob, logit))
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if size == ():
size = None
if prob is not None:
is_tensor = isinstance(prob, tensor_type_name)
if is_tensor:
return _npi.bernoulli(prob, prob=None, logit=None, is_logit=False,
size=size, ctx=ctx, dtype=dtype, out=out)
else:
return _npi.bernoulli(prob=prob, logit=None, is_logit=False,
size=size, ctx=ctx, dtype=dtype, out=out)
else:
is_tensor = isinstance(logit, tensor_type_name)
if is_tensor:
return _npi.bernoulli(logit, prob=None, logit=None, is_logit=True,
size=size, ctx=ctx, dtype=dtype, out=out)
else:
return _npi.bernoulli(prob=None, logit=logit, is_logit=True,
size=size, ctx=ctx, dtype=dtype, out=out)
def uniform_n(low=0.0, high=1.0, batch_shape=None, dtype=None, ctx=None):
r"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval
``[low, high)`` (includes low, but excludes high). In other words,
any value within the given interval is equally likely to be drawn
by `uniform`.
Parameters
----------
low : float, ndarray, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, ndarray, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
batch_shape : int or tuple of ints, optional
Batch shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k * broadcast(low, high).size`` samples are drawn.
If size is ``None`` (default),
a scalar tensor containing a single value is returned if
``low`` and ``high`` are both scalars. Otherwise,
``np.broadcast(low, high).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray
Drawn samples from the parameterized uniform distribution.
See Also
--------
randint : Discrete uniform distribution, yielding integers.
rand : Convenience function that accepts dimensions as input, e.g.,
``rand(2,2)`` would generate a 2-by-2 array of floats,
uniformly distributed over ``[0, 1)``.
Notes
-----
The probability density function of the uniform distribution is
.. math:: p(x) = \frac{1}{b - a}
anywhere within the interval ``[a, b)``, and zero elsewhere.
When ``high`` == ``low``, values of ``low`` will be returned.
If ``high`` < ``low``, the results are officially undefined
and may eventually raise an error, i.e. do not rely on this
function to behave when passed arguments satisfying that
inequality condition.
"""
from ...numpy import ndarray as np_ndarray
input_type = (isinstance(low, np_ndarray), isinstance(high, np_ndarray))
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if batch_shape == ():
batch_shape = None
if input_type == (True, True):
return _npi.uniform_n(low, high, low=None, high=None, size=batch_shape,
ctx=ctx, dtype=dtype)
elif input_type == (False, True):
return _npi.uniform_n(high, low=low, high=None, size=batch_shape,
ctx=ctx, dtype=dtype)
elif input_type == (True, False):
return _npi.uniform_n(low, low=None, high=high, size=batch_shape,
ctx=ctx, dtype=dtype)
else:
return _npi.uniform_n(low=low, high=high, size=batch_shape,
ctx=ctx, dtype=dtype)
def normal_n(loc=0.0, scale=1.0, batch_shape=None, dtype=None, ctx=None):
r"""Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float, optional
Mean (centre) of the distribution.
scale : float, optional
Standard deviation (spread or "width") of the distribution.
batch_shape : int or tuple of ints, optional
Batch shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k * broadcast(low, high).size`` samples are drawn.
If size is ``None`` (default),
a scalar tensor containing a single value is returned if
``low`` and ``high`` are both scalars. Otherwise,
``np.broadcast(loc, scale).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output, default is current context.
Returns
-------
out : ndarray
Drawn samples from the parameterized normal distribution.
Notes
-----
The probability density for the Gaussian distribution is
.. math:: p(x) = \frac{1}{\sqrt{ 2 \pi \sigma^2 }}
e^{ - \frac{ (x - \mu)^2 } {2 \sigma^2} },
where :math:`\mu` is the mean and :math:`\sigma` the standard
deviation. The square of the standard deviation, :math:`\sigma^2`,
is called the variance.
The function has its peak at the mean, and its "spread" increases with
the standard deviation (the function reaches 0.607 times its maximum at
:math:`x + \sigma` and :math:`x - \sigma` [2]_). This implies that
`numpy.random.normal` is more likely to return samples lying close to
the mean, rather than those far away.
References
----------
.. [1] Wikipedia, "Normal distribution",
https://en.wikipedia.org/wiki/Normal_distribution
.. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
Random Variables and Random Signal Principles", 4th ed., 2001,
pp. 51, 51, 125.
Examples
--------
>>> mu, sigma = 0, 0.1 # mean and standard deviation
>>> s = np.random.normal(mu, sigma, 1000)
Verify the mean and the variance:
>>> np.abs(mu - np.mean(s)) < 0.01
array(True)
"""
from ...numpy import ndarray as np_ndarray
input_type = (isinstance(loc, np_ndarray), isinstance(scale, np_ndarray))
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if batch_shape == ():
batch_shape = None
if input_type == (True, True):
return _npi.normal_n(loc, scale, loc=None, scale=None, size=batch_shape,
ctx=ctx, dtype=dtype)
elif input_type == (False, True):
return _npi.normal_n(scale, loc=loc, scale=None, size=batch_shape,
ctx=ctx, dtype=dtype)
elif input_type == (True, False):
return _npi.normal_n(loc, loc=None, scale=scale, size=batch_shape,
ctx=ctx, dtype=dtype)
else:
return _npi.normal_n(loc=loc, scale=scale, size=batch_shape,
ctx=ctx, dtype=dtype)
|
larroy/mxnet
|
python/mxnet/ndarray/numpy_extension/random.py
|
Python
|
apache-2.0
| 10,473
|
[
"Gaussian"
] |
4ce3ff8603415e0c4a15064b408529e6b34c508dac0ec59905b6f9bd71981a62
|
# classifier - classification algorithms for Bregman toolkit
__version__ = '1.0'
__author__ = 'Michael A. Casey'
__copyright__ = "Copyright (C) 2010 Michael Casey, Dartmouth College, All Rights Reserved"
__license__ = "GPL Version 2.0 or Higher"
__email__ = 'mcasey@dartmouth.edu'
import numpy as N
import scipy.linalg
from random import random
class ClassifierError(Exception):
pass
class Classifier:
"""
::
Base class for supervised and unsupervised classifiers
"""
def __init__(self, num_classes, max_iter=200, error_thresh=0.001, dist_fun='Euc'):
self.num_classes=num_classes
self.max_iter = max_iter
self.error_thresh = error_thresh
self.dist_fun = dist_fun
self.M = None
self.dists = None
self.verbosity = 0
def _initialize(self):
pass
def train(self, X, labels=None, reset=True):
pass
def classify(self, Y, labels=None):
pass
class KMeans(Classifier):
"""
::
Unsupervised classification using k-means and random initialization
km = KMeans(num_classes, max_iter, error_thresh, dist_fun)
num_classes - number of clusters to estimate
max_iter - maximum number of iterations for training
error_thresh - threshold for sum-square-differences of old/new means
dist_fun - future parameter, to allow alternate metrics
returns a new KMeans instance
training:
assigns = train(X)
X - numpy ndarray observation matrix, n x d, n observations, d dimensions
after training:
assigns = classify(X)
X - numpy ndarray observation matrix, n x d, n observations, d dimensions
self.M - the trained means
"""
def __init__(self, num_classes, max_iter=200, error_thresh=0.001, dist_fun='Euc'):
Classifier.__init__(self, num_classes, max_iter=200, error_thresh=0.001, dist_fun='Euc')
def _initialize(self, data):
"""
::
Initialize clusters semi-deterministically from data
"""
#Cov = N.diag( N.diag( N.cov(data, rowvar=0) ) )
k = self.num_classes
#self.M = N.zeros((1,data.shape[1]))
#self.M = N.dot(N.random.randn(k, data.shape[1]) , scipy.linalg.sqrtm(Cov).real) + N.dot(N.ones((k,1)) , data.mean(0).reshape(1,-1))
self.M = N.random.randn(k, data.shape[1])
def train(self, X, labels=None, reset=True):
"""
::
Train the classifier using the data passed in X.
X is a row-wise observation matrix with variates in the columns
and observations in the rows.
If reset=True (default) means will be re-initialized from data.
"""
self.X = X
rw,cl = X.shape
if reset:
self._initialize(X)
for i in range(self.max_iter):
assignments = self.classify(self.X)
sse = self._update_means(assignments)
if sse < self.error_thresh:
break
return assignments
def _update_means(self, assignments):
"""
::
Given the assignment vector, compute new means
"""
sse = N.zeros(self.num_classes)
old_means = self.M.copy()
empty_classes = []
for k in range(self.num_classes):
idx = N.where(assignments==k)[0]
if len(idx):
self.M[k,:] = self.X[idx,:].mean(0)
else:
empty_classes.append(k)
if len(empty_classes):
self.M = self.M[N.setdiff1d(range(self.num_classes),empty_classes),:]
old_means = old_means[N.setdiff1d(range(self.num_classes),empty_classes),:]
self.num_classes -= len(empty_classes)
sse = ((old_means - self.M)**2).sum()
return sse
def classify(self, Y, labels=None):
"""
::
Given a trained classifier, return the assignments to classes for matrix Y.
"""
self.dists = self._mtx_distance(self.M, Y)
assignments = self.dists.argmin(0)
return assignments
@staticmethod # FIX ME, use bregman.distance functions
def _mtx_distance(X,Y):
"""
::
matrix-matrix distances between matrix X and matrix Y.
Computes distances between every row of X and every row of Y
Inputs:
X, Y - a row-wise observation matrices
Output:
d (rX, rY), ndarray of squared distances between every row in X to every row in Y
"""
d = N.zeros((X.shape[0], Y.shape[0]))
for k in range(X.shape[0]):
d[k,:] = ((N.kron(X[k,:],N.ones((Y.shape[0],1))) - Y)**2).sum(1)
return d
class SoftKMeans(KMeans):
"""
::
Employ soft kmeans algorithm for unsupervised clustering
David MacKay,"Information Theory, Inference and Learning Algorithms", Cambridge, 2003
Chapter 22
Parameters:
beta - softness/stiffness [2.0]
"""
def __init__(self, num_classes, max_iter=200, error_thresh=0.001, beta = 2.0, dist_fun='Euc'):
KMeans.__init__(self, num_classes, max_iter, error_thresh, dist_fun)
self.beta = beta
def _update_means(self, assignments):
"""
Override KMeans _update_means to perform soft kmeans assignments
"""
resp = N.exp(-self.beta * self.dists);
resp /= N.dot(ones((self.num_classes,1)), resp.sum(0).reshape(1,-1))
old_means = self.M.copy()
for k in range(self.num_classes):
Xk = N.dot(resp[k,:].reshape(-1,1), N.ones((1,self.X.shape[1]))) * self.X;
self.M[k,:] = Xk.sum(0) / resp[k,:].sum(0)
sse = ((old_means - self.M)**2).sum()
if self.verbosity:
print "sse = ", sse
return sse
class GaussianMulti(Classifier):
"""
::
Supervised classification using a multivariate Gaussian model per class.
Also known as a quadratic classifier (Therien 1989).
gm = GaussianMulti(num_classes, max_iter, error_thresh, dist_fun)
num_classes - number of clusters to estimate (required)
max_iter - maximum number of iterations for training [200]
error_thresh - threshold for sum-square-differences of old/new means [.001]
dist_fun - future parameter, to allow alternate metrics [bregman.distance.euc]
returns a new GaussianMulti instance
training:
assigns = train(X, labels)
X - numpy ndarray observation matrix, n x d, n observations, d dimensions
labels - per row labels for data in X, must be same length as rows of X
after training:
assigns = classify(X)
X - numpy ndarray observation matrix, n x d, n observations, d dimensions
returns labels for class assignments to rows in X
self.M - the trained means
self.C - the trained covariances
"""
def __init__(self, num_classes, max_iter=200, error_thresh=0.001, dist_fun='Euc'):
Classifier.__init__(self, num_classes, max_iter, error_thresh, dist_fun)
def train(self, data, labels=None, reset=True ):
"""
::
myGM.train(data, labels)
Supervised classification for each unique label in labels using data.
Employs a multivariate Gaussian model per class.
self.M - per-class Gaussian means
self.C - per-class Gaussian covariance matrices
"""
if labels is None:
print "Supervised classifier needs labels to train."
raise ClassifierError()
num_observations = data.shape[0]
num_labels = labels.shape[0]
labs = N.lib.arraysetops.unique(labels) # in lexicographic order
self.labels = labs
if len(labs) != self.num_classes:
print "number of labels doesn't match number of classes in classifier instance"
raise ValueError()
self.M = N.zeros((len(labs),data.shape[1]))
self.C = N.zeros((len(labs),data.shape[1],data.shape[1]))
for k, c in enumerate(labs):
c_idx = N.where(labels==c)[0] # logical index for label c in data
self.M[k,:] = data[c_idx,:].mean(0)
self.C[k,:,:] = N.cov(data[c_idx,:],rowvar=0)
def classify(self, data, labels=None):
"""
::
labels = myGM.classify(data)
"""
probs = N.zeros((data.shape[0], self.num_classes))
for k in range(self.num_classes):
probs[:,k] = GaussianPDF(data, self.M[k,:], self.C[k,:,:])
probs[N.where(probs<0)]=0
assignments = N.argmax(probs,axis=1)
return self.labels[assignments], probs
def evaluate(self, data, labels):
"""
::
Estimate predicted labels from data, compare with True labels.
Returns:
a - accuracy as a proportion: 0.0 - 1.0
"""
predicted_labels, p = self.classify(data)
return len(N.where(predicted_labels.reshape(-1,1) == labels.reshape(-1,1))[0]) / float(len(labels))
def classify_range(self, data, upper_bounds):
"""
::
Classify data in ranges with given upper_bounds.
The algorithm is a majority vote algorithm among the classes.
Returns:
a - assignments per upper_bound region
c - counts of assignments per class
"""
start = 0
assignments = N.zeros(len(upper_bounds))
predicted_counts = N.zeros((len(upper_bounds),self.num_classes))
predicted_labels, p = self.classify(data)
for i, stop in enumerate(upper_bounds):
for j, label in enumerate(self.labels):
predicted_counts[i,j] = len(N.where(predicted_labels[start:stop]==label)[0])
assignments[i] = self.labels[N.argmax(predicted_counts[i,:])]
start = stop
return assignments, predicted_counts
def evaluate_range(self, data, true_labels, upper_bounds):
"""
::
Perform assignment aggregation within data ranges by majority vote.
The maximum count among the K classes wins per range.
In case of a tie, randomly select among tied classes.
Returns:
a - accuracy as a proportion: 0.0 - 1.0
e - vector of True/False per range
"""
start = 0
evaluation = N.zeros(len(upper_bounds), dtype='bool')
predicted_counts = N.zeros(self.num_classes)
true_counts = N.zeros(self.num_classes)
predicted_labels, p = self.classify(data)
for i, stop in enumerate(upper_bounds):
for j, label in enumerate(self.labels):
predicted_counts[j] = len(N.where(predicted_labels[start:stop]==label)[0])
true_counts[j] = len(N.where(true_labels[start:stop]==label)[0])
evaluation[i] = self.labels[N.argmax(predicted_counts)] == self.labels[N.argmax(true_counts)]
start = stop
return len(N.where(evaluation)[0])/float(len(evaluation)), evaluation
def GaussianPDF(data, m, C):
"""
::
Gaussian PDF lookup for row-wise data
data - n-dimensional observation matrix (or vector)
m - Gaussian mean vector
C - Gaussian covariance matrix
"""
n = C.shape[0]
d = N.linalg.linalg.det(C)
const = 1. / ( (2*N.pi)**(n/2) * N.sqrt(d) ) # assume equal priors
g = lambda x: const * N.exp ( -0.5 * N.dot(N.dot((x - m), N.linalg.linalg.inv(C)), (x - m).T ) )
p = [ g(x) for x in data]
return N.array(p)
|
bregmanstudio/BregmanToolkit
|
bregman/classifier.py
|
Python
|
mit
| 11,802
|
[
"Gaussian"
] |
b792206a851addd2fcfc1fd881d5bd0a2d1a57f8beb9077b6f4192674c08d03c
|
# Version: 0.19
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/python-versioneer/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
* [![Latest Version][pypi-image]][pypi-url]
* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere in your $PATH
* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
* run `versioneer install` in your source tree, commit the results
* Verify version information with `python setup.py version`
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes).
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/python-versioneer/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other languages) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## Similar projects
* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
dependency
* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
versioneer
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
[pypi-url]: https://pypi.python.org/pypi/versioneer/
[travis-image]:
https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
"""
import configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as f:
parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.19) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "build_py" in cmds:
_build_py = cmds["build_py"]
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "sdist" in cmds:
_sdist = cmds["sdist"]
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (
EnvironmentError,
configparser.NoSectionError,
configparser.NoOptionError,
) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
pymor/pymor-deal.II
|
versioneer.py
|
Python
|
bsd-2-clause
| 70,238
|
[
"Brian"
] |
eba028cc37a274e262ecc5e8d8cd076c1dfacaac63e8b39d9f8d55a76d3a0ac8
|
#!/usr/local/bin/python
"""
Cantera .cti input file processor
The functions and classes in this module process Cantera .cti input
files and produce CTML files. It can be imported as a module, or used
as a script.
script usage:
python ctml_writer.py infile.cti
This will produce CTML file 'infile.xml'
"""
import string
class CTI_Error:
"""Exception raised if an error is encountered while
parsing the input file."""
def __init__(self, msg):
print '\n\n***** Error parsing input file *****\n\n'
print msg
print
indent = ['',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ']
#-----------------------------------------------------
class XMLnode:
"""This is a minimal class to allow easy creation of an XML tree
from Python. It can write XML, but cannot read it."""
def __init__(self, name="--", value = ""):
"""Create a new node. Usually this only needs to be explicitly
called to create the root element. Method addChild calls this
constructor to create the new child node."""
self._name = name
# convert 'value' to a string if it is not already, and
# strip leading whitespace
if type(value) <> types.StringType:
self._value = string.lstrip(`value`)
else:
self._value = string.lstrip(value)
self._attribs = {} # dictionary of attributes
self._children = [] # list of child nodes
self._childmap = {} # dictionary of child nodes
def name(self):
"""The tag name of the node."""
return self._name
def nChildren(self):
"""Number of child elements."""
return len(self._children)
def addChild(self, name, value=""):
"""Add a child with tag 'name', and set its value if the value
parameter is supplied."""
# create a new node for the child
c = XMLnode(name = name, value = value)
# add it to the list of children, and to the dictionary
# of children
self._children.append(c)
self._childmap[name] = c
return c
def addComment(self, comment):
"""Add a comment."""
self.addChild(name = '_comment_', value = comment)
def value(self):
"""A string containing the element value."""
return self._value
def child(self, name=""):
"""The child node with specified name."""
return self._childmap[name]
def __getitem__(self, key):
"""Get an attribute using the syntax node[key]"""
return self._attribs[key]
def __setitem__(self, key, value):
"""Set a new attribute using the syntax node[key] = value."""
self._attribs[key] = value
def __call__(self):
"""Allows getting the value using the syntax 'node()'"""
return self._value
def write(self, file):
"""Write out the XML tree to a file."""
f = open(file,'w')
f.write('<?xml version="1.0"?>\n')
self._write(f, 0)
f.write('\n')
def _write(self, f, level = 0):
"""Internal method used to write the XML representation of
each node."""
if self._name == "": return
indnt = indent[level]
# handle comments
if self._name == '_comment_':
f.write('\n'+indnt+'<!--')
if len(self._value) > 0:
if self._value[0] <> ' ':
self._value = ' '+self._value
if self._value[-1] <> ' ':
self._value = self._value+' '
f.write(self._value+'-->')
return
# write the opening tag and attributes
f.write(indnt + '<' + self._name)
for a in self._attribs.keys():
f.write(' '+a+'="'+self._attribs[a]+'"')
if (self._value == "" and self.nChildren() == 0):
f.write('/>')
else:
f.write('>')
if self._value <> "":
vv = string.lstrip(self._value)
ieol = vv.find('\n')
if ieol >= 0:
while 1 > 0:
ieol = vv.find('\n')
if ieol >= 0:
f.write('\n '+indnt+vv[:ieol])
vv = string.lstrip(vv[ieol+1:])
else:
f.write('\n '+indnt+vv)
break
else:
f.write(self._value)
for c in self._children:
f.write('\n')
c._write(f, level + 2)
if (self.nChildren() > 0):
f.write('\n'+indnt)
f.write('</'+self._name+'>')
#--------------------------------------------------
# constants that can be used in .cti files
OneAtm = 1.01325e5
OneBar = 1.0e5
import types, math, copy
# default units
_ulen = 'm'
_umol = 'kmol'
_umass = 'kg'
_utime = 's'
_ue = 'J/kmol'
_uenergy = 'J'
_upres = 'Pa'
# used to convert reaction pre-exponentials
_length = {'cm':0.01, 'm':1.0, 'mm':0.001}
_moles = {'kmol':1.0, 'mol':0.001, 'molec':1.0/6.023e26}
_time = {'s':1.0, 'min':60.0, 'hr':3600.0}
# default std state pressure
_pref = 1.0e5 # 1 bar
_name = 'noname'
# these lists store top-level entries
_elements = []
_species = []
_speciesnames = []
_phases = []
_reactions = []
_atw = {}
_enames = {}
_valsp = ''
_valrxn = ''
_valexport = ''
_valfmt = ''
def export_species(file, fmt = 'CSV'):
global _valexport
global _valfmt
_valexport = file
_valfmt = fmt
def validate(species = 'yes', reactions = 'yes'):
global _valsp
global _valrxn
_valsp = species
_valrxn = reactions
def isnum(a):
"""True if a is an integer or floating-point number."""
if type(a) == types.IntType or type(a) == types.FloatType:
return 1
else:
return 0
def is_local_species(name):
"""true if the species named 'name' is defined in this file"""
if name in _speciesnames:
return 1
return 0
def dataset(nm):
"Set the dataset name. Invoke this to change the name of the xml file."
global _name
_name = nm
def standard_pressure(p0):
"""Set the default standard-state pressure."""
global _pref
_pref = p0
def units(length = '', quantity = '', mass = '', time = '',
act_energy = '', energy = '', pressure = ''):
"""set the default units."""
global _ulen, _umol, _ue, _utime, _umass, _uenergy, _upres
if length: _ulen = length
if quantity: _umol = quantity
if act_energy: _ue = act_energy
if time: _utime = time
if mass: _umass = mass
if energy: _uenergy = energy
if pressure: _upres = pressure
def ufmt(base, n):
"""return a string representing a unit to a power n."""
if n == 0: return ''
if n == 1: return '-'+base
if n == -1: return '/'+base
if n > 0: return '-'+base+`n`
if n < 0: return '/'+base+`-n`
def write():
"""write the CTML file."""
x = XMLnode("ctml")
v = x.addChild("validate")
v["species"] = _valsp
v["reactions"] = _valrxn
if _elements:
ed = x.addChild("elementData")
for e in _elements:
e.build(ed)
for ph in _phases:
ph.build(x)
s = species_set(name = _name, species = _species)
s.build(x)
r = x.addChild('reactionData')
r['id'] = 'reaction_data'
for rx in _reactions:
rx.build(r)
if _name <> 'noname':
x.write(_name+'.xml')
else:
print x
if _valexport:
f = open(_valexport,'w')
for s in _species:
s.export(f, _valfmt)
f.close()
def addFloat(x, nm, val, fmt='', defunits=''):
"""
Add a child element to XML element x representing a
floating-point number.
"""
u = ''
s = ''
if isnum(val):
fval = float(val)
if fmt:
s = fmt % fval
else:
s = `fval`
xc = x.addChild(nm, s)
if defunits:
xc['units'] = defunits
else:
v = val[0]
u = val[1]
if fmt:
s = fmt % v
else:
s = `v`
xc = x.addChild(nm, s)
xc['units'] = u
def getAtomicComp(atoms):
if type(atoms) == types.DictType: return atoms
a = atoms.replace(',',' ')
toks = a.split()
d = {}
for t in toks:
b = t.split(':')
d[b[0]] = int(b[1])
return d
def getReactionSpecies(s):
"""Take a reaction string and return a
dictionary mapping species names to stoichiometric
coefficients. If any species appears more than once,
the returned stoichiometric coefficient is the sum.
>>> s = 'CH3 + 3 H + 5.2 O2 + 0.7 H'
>>> getReactionSpecies(s)
>>> {'CH3':1, 'H':3.7, 'O2':5.2}
"""
# get rid of the '+' signs separating species. Only plus signs
# surrounded by spaces are replaced, so that plus signs may be
# used in species names (e.g. 'Ar3+')
toks = s.replace(' + ',' ').split()
d = {}
n = 1.0
for t in toks:
# try to convert the token to a number.
try:
n = float(t)
if n < 0.0:
raise CTI_Error("negative stoichiometric coefficient:"
+s)
#if t > '0' and t < '9':
# n = int(t)
#else:
# token isn't a number, so it must be a species name
except:
if d.has_key(t): # already seen this token
d[t] += n # so increment its value by the last
# value of n
else:
d[t] = n # first time this token has been seen,
# so set its value to n
n = 1 # reset n to 1.0 for species that do not
# specify a stoichiometric coefficient
return d
class element:
def __init__(self, symbol = '',
atomic_mass = 0.01,
atomic_number = 0):
self._sym = symbol
self._atw = atomic_mass
self._num = atomic_number
global _elements
_elements.append(self)
def build(self, db):
e = db.addChild("element")
e["name"] = self._sym
e["atomicWt"] = `self._atw`
e["atomicNumber"] = `self._num`
class species_set:
def __init__(self, name = '', species = []):
self._s = species
self._name = name
#self.type = SPECIES_SET
def build(self, p):
p.addComment(' species definitions ')
sd = p.addChild("speciesData")
sd["id"] = "species_data"
for s in self._s:
#if s.type == SPECIES:
s.build(sd)
#else:
# raise 'wrong object type in species_set: '+s.__class__
class species:
"""A species."""
def __init__(self,
name = 'missing name!',
atoms = '',
note = '',
thermo = None,
transport = None,
charge = -999,
size = 1.0):
self._name = name
self._atoms = getAtomicComp(atoms)
self._comment = note
if thermo:
self._thermo = thermo
else:
self._thermo = const_cp()
self._transport = transport
chrg = 0
self._charge = charge
if self._atoms.has_key('E'):
chrg = -self._atoms['E']
if self._charge <> -999:
if self._charge <> chrg:
raise CTI_Error('specified charge inconsistent with number of electrons')
else:
self._charge = chrg
self._size = size
global _species
global _enames
_species.append(self)
global _speciesnames
if name in _speciesnames:
raise CTI_Error('species '+name+' multiply defined.')
_speciesnames.append(name)
for e in self._atoms.keys():
_enames[e] = 1
def export(self, f, fmt = 'CSV'):
global _enames
if fmt == 'CSV':
str = self._name+','
for e in _enames:
if self._atoms.has_key(e):
str += `self._atoms[e]`+','
else:
str += '0,'
f.write(str)
if type(self._thermo) == types.InstanceType:
self._thermo.export(f, fmt)
else:
nt = len(self._thermo)
for n in range(nt):
self._thermo[n].export(f, fmt)
f.write('\n')
def build(self, p):
hdr = ' species '+self._name+' '
p.addComment(hdr)
s = p.addChild("species")
s["name"] = self._name
a = ''
for e in self._atoms.keys():
a += e+':'+`self._atoms[e]`+' '
s.addChild("atomArray",a)
if self._comment:
s.addChild("note",self._comment)
if self._charge <> -999:
s.addChild("charge",self._charge)
if self._size <> 1.0:
s.addChild("size",self._size)
if self._thermo:
t = s.addChild("thermo")
if type(self._thermo) == types.InstanceType:
self._thermo.build(t)
else:
nt = len(self._thermo)
for n in range(nt):
self._thermo[n].build(t)
if self._transport:
t = s.addChild("transport")
if type(self._transport) == types.InstanceType:
self._transport.build(t)
else:
nt = len(self._transport)
for n in range(nt):
self._transport[n].build(t)
class thermo:
"""Base class for species standard-state thermodynamic properties."""
def _build(self, p):
return p.addChild("thermo")
def export(self, f, fmt = 'CSV'):
pass
class Mu0_table(thermo):
"""Properties are computed by specifying a table of standard
chemical potentials vs. T."""
def __init__(self, range = (0.0, 0.0),
h298 = 0.0,
mu0 = None,
p0 = -1.0):
self._t = range
self._h298 = h298
self._mu0 = mu0
self._pref = p0
def build(self, t):
n = t.addChild("Mu0")
n['Tmin'] = `self._t[0]`
n['Tmax'] = `self._t[1]`
if self._pref <= 0.0:
n['P0'] = `_pref`
else:
n['P0'] = `self._pref`
energy_units = _uenergy+'/'+_umol
addFloat(n,"H298", self._h298, defunits = energy_units)
n.addChild("numPoints", len(self._mu0))
mustr = ''
tstr = ''
col = 0
for v in self._mu0:
mu0 = v[1]
t = v[0]
tstr += '%17.9E, ' % t
mustr += '%17.9E, ' % mu0
col += 1
if col == 3:
tstr = tstr[:-2]+'\n'
mustr = mustr[:-2]+'\n'
col = 0
u = n.addChild("floatArray", mustr)
u["size"] = "numPoints"
u["name"] = "Mu0Values"
u = n.addChild("floatArray", tstr)
u["size"] = "numPoints"
u["name"] = "Mu0Temperatures"
class NASA(thermo):
"""NASA polynomial parameterization."""
def __init__(self, range = (0.0, 0.0),
coeffs = [], p0 = -1.0):
self._t = range
self._pref = p0
if len(coeffs) <> 7:
raise CTI_Error('NASA coefficient list must have length = 7')
self._coeffs = coeffs
def export(self, f, fmt='CSV'):
if fmt == 'CSV':
str = 'NASA,'+`self._t[0]`+','+`self._t[1]`+','
for i in range(7):
str += '%17.9E, ' % self._coeffs[i]
f.write(str)
def build(self, t):
n = t.addChild("NASA")
n['Tmin'] = `self._t[0]`
#n['Tmid'] = `self._t[1]`
n['Tmax'] = `self._t[1]`
if self._pref <= 0.0:
n['P0'] = `_pref`
else:
n['P0'] = `self._pref`
str = ''
for i in range(4):
str += '%17.9E, ' % self._coeffs[i]
str += '\n'
str += '%17.9E, %17.9E, %17.9E' % (self._coeffs[4],
self._coeffs[5], self._coeffs[6])
#if i > 0 and 3*((i+1)/3) == i: str += '\n'
#str = str[:-2]
u = n.addChild("floatArray", str)
u["size"] = "7"
u["name"] = "coeffs"
class Shomate(thermo):
"""Shomate polynomial parameterization."""
def __init__(self, range = (0.0, 0.0),
coeffs = [], p0 = -1.0):
self._t = range
self._pref = p0
if len(coeffs) <> 7:
raise CTI_Error('Shomate coefficient list must have length = 7')
self._coeffs = coeffs
def build(self, t):
n = t.addChild("Shomate")
n['Tmin'] = `self._t[0]`
n['Tmax'] = `self._t[1]`
if self._pref <= 0.0:
n['P0'] = `_pref`
else:
n['P0'] = `self._pref`
str = ''
for i in range(4):
str += '%17.9E, ' % self._coeffs[i]
str += '\n'
str += '%17.9E, %17.9E, %17.9E' % (self._coeffs[4],
self._coeffs[5], self._coeffs[6])
u = n.addChild("floatArray", str)
u["size"] = "7"
u["name"] = "coeffs"
class const_cp(thermo):
"""Constant specific heat."""
def __init__(self,
t0 = 298.15, cp0 = 0.0, h0 = 0.0, s0 = 0.0,
tmax = 5000.0, tmin = 100.0):
self._t = [tmin, tmax]
self._c = [t0, h0, s0, cp0]
def build(self, t):
#t = self._build(p)
c = t.addChild('const_cp')
if self._t[0] >= 0.0: c['Tmin'] = `self._t[0]`
if self._t[1] >= 0.0: c['Tmax'] = `self._t[1]`
energy_units = _uenergy+'/'+_umol
addFloat(c,'t0',self._c[0], defunits = 'K')
addFloat(c,'h0',self._c[1], defunits = energy_units)
addFloat(c,'s0',self._c[2], defunits = energy_units+'/K')
addFloat(c,'cp0',self._c[3], defunits = energy_units+'/K')
class gas_transport:
"""Transport coefficients for ideal gas transport model."""
def __init__(self, geom = 'nonlin',
diam = 0.0, well_depth = 0.0, dipole = 0.0,
polar = 0.0, rot_relax = 0.0):
self._geom = geom
self._diam = diam
self._well_depth = well_depth
self._dipole = dipole
self._polar = polar
self._rot_relax = rot_relax
def build(self, t):
#t = s.addChild("transport")
t['model'] = 'gas_transport'
# t.addChild("geometry", self._geom)
tg = t.addChild('string',self._geom)
tg['title'] = 'geometry'
addFloat(t, "LJ_welldepth", (self._well_depth, 'K'), '%8.3f')
addFloat(t, "LJ_diameter", (self._diam, 'A'),'%8.3f')
addFloat(t, "dipoleMoment", (self._dipole, 'Debye'),'%8.3f')
addFloat(t, "polarizability", (self._polar, 'A3'),'%8.3f')
addFloat(t, "rotRelax", self._rot_relax,'%8.3f')
class Arrhenius:
def __init__(self,
A = 0.0,
n = 0.0,
E = 0.0,
coverage = [],
rate_type = ''):
self._c = [A, n, E]
self._type = rate_type
if coverage:
if type(coverage[0]) == types.StringType:
self._cov = [coverage]
else:
self._cov = coverage
else:
self._cov = None
def build(self, p, units_factor = 1.0,
gas_species = [], name = '', rxn_phase = None):
a = p.addChild('Arrhenius')
if name: a['name'] = name
# check for sticking probability
if self._type:
a['type'] = self._type
if self._type == 'stick':
ngas = len(gas_species)
if ngas <> 1:
raise CTI_Error("""
Sticking probabilities can only be used for reactions with one gas-phase
reactant, but this reaction has """+`ngas`+': '+`gas_species`)
else:
a['species'] = gas_species[0]
units_factor = 1.0
# if a pure number is entered for A, multiply by the conversion
# factor to SI and write it to CTML as a pure number. Otherwise,
# pass it as-is through to CTML with the unit string.
if isnum(self._c[0]):
addFloat(a,'A',self._c[0]*units_factor, fmt = '%14.6E')
elif len(self._c[0]) == 2 and self._c[0][1] == '/site':
addFloat(a,'A',self._c[0][0]/rxn_phase._sitedens,
fmt = '%14.6E')
else:
addFloat(a,'A',self._c[0], fmt = '%14.6E')
# The b coefficient should be dimensionless, so there is no
# need to use 'addFloat'
a.addChild('b',`self._c[1]`)
# If a pure number is entered for the activation energy,
# add the default units, otherwise use the supplied units.
addFloat(a,'E', self._c[2], fmt = '%f', defunits = _ue)
# for surface reactions, a coverage dependence may be specified.
if self._cov:
for cov in self._cov:
c = a.addChild('coverage')
c['species'] = cov[0]
addFloat(c, 'a', cov[1], fmt = '%f')
c.addChild('m', `cov[2]`)
addFloat(c, 'e', cov[3], fmt = '%f', defunits = _ue)
def stick(A = 0.0, n = 0.0, E = 0.0, coverage = []):
return Arrhenius(A = A, n = n, E = E, coverage = coverage, rate_type = 'stick')
def getPairs(s):
toks = s.split()
m = {}
for t in toks:
key, val = t.split(':')
m[key] = float(val)
return m
class reaction:
def __init__(self,
equation = '',
kf = None,
id = '',
order = '',
options = []
):
self._id = id
self._e = equation
self._order = order
if type(options) == types.StringType:
self._options = [options]
else:
self._options = options
global _reactions
self._num = len(_reactions)+1
r = ''
p = ''
for e in ['<=>', '=>', '=']:
if self._e.find(e) >= 0:
r, p = self._e.split(e)
if e in ['<=>','=']: self.rev = 1
else: self.rev = 0
break
self._r = getReactionSpecies(r)
self._p = getReactionSpecies(p)
self._rxnorder = copy.copy(self._r)
if self._order:
ord = getPairs(self._order)
for o in ord.keys():
if self._rxnorder.has_key(o):
self._rxnorder[o] = ord[o]
else:
raise CTI_Error("order specified for non-reactant: "+o)
self._kf = kf
self._igspecies = []
self._dims = [0]*4
self._rxnphase = None
self._type = ''
_reactions.append(self)
def build(self, p):
if self._id:
id = self._id
else:
if self._num < 10:
nstr = '000'+`self._num`
elif self._num < 100:
nstr = '00'+`self._num`
elif self._num < 1000:
nstr = '0'+`self._num`
else:
nstr = `self._num`
id = nstr
mdim = 0
ldim = 0
str = ''
rxnph = []
for s in self._r.keys():
ns = self._rxnorder[s]
nm = -999
nl = -999
str += s+':'+`self._r[s]`+' '
mindim = 4
for ph in _phases:
if ph.has_species(s):
nm, nl = ph.conc_dim()
if ph.is_ideal_gas():
self._igspecies.append(s)
if not ph in rxnph:
rxnph.append(ph)
self._dims[ph._dim] += 1
if ph._dim < mindim:
self._rxnphase = ph
mindim = ph._dim
break
if nm == -999:
raise CTI_Error("species "+s+" not found")
mdim += nm*ns
ldim += nl*ns
p.addComment(" reaction "+id+" ")
r = p.addChild('reaction')
r['id'] = id
if self.rev:
r['reversible'] = 'yes'
else:
r['reversible'] = 'no'
noptions = len(self._options)
for nss in range(noptions):
s = self._options[nss]
if s == 'duplicate':
r['duplicate'] = 'yes'
elif s == 'negative_A':
r['negative_A'] = 'yes'
ee = self._e.replace('<','[')
ee = ee.replace('>',']')
r.addChild('equation',ee)
if self._order:
for osp in self._rxnorder.keys():
o = r.addChild('order',self._rxnorder[osp])
o['species'] = osp
# adjust the moles and length powers based on the dimensions of
# the rate of progress (moles/length^2 or moles/length^3)
if self._type == 'surface':
mdim += -1
ldim += 2
p = self._dims[:3]
if p[0] <> 0 or p[1] <> 0 or p[2] > 1:
raise CTI_Error(self._e +'\nA surface reaction may contain at most '+
'one surface phase.')
elif self._type == 'edge':
mdim += -1
ldim += 1
p = self._dims[:2]
if p[0] <> 0 or p[1] > 1:
raise CTI_Error(self._e+'\nAn edge reaction may contain at most '+
'one edge phase.')
else:
mdim += -1
ldim += 3
# add the reaction type as an attribute if it has been specified.
if self._type:
r['type'] = self._type
# The default rate coefficient type is Arrhenius. If the rate
# coefficient has been specified as a sequence of three
# numbers, then create a new Arrhenius instance for it;
# otherwise, just use the supplied instance.
nm = ''
kfnode = r.addChild('rateCoeff')
if self._type == '':
self._kf = [self._kf]
elif self._type == 'surface':
self._kf = [self._kf]
elif self._type == 'edge':
self._kf = [self._kf]
elif self._type == 'threeBody':
self._kf = [self._kf]
mdim += 1
ldim -= 3
if self._type == 'edge':
if self._beta > 0:
electro = kfnode.addChild('electrochem')
electro['beta'] = `self._beta`
for kf in self._kf:
unit_fctr = (math.pow(_length[_ulen], -ldim) *
math.pow(_moles[_umol], -mdim) / _time[_utime])
if type(kf) == types.InstanceType:
k = kf
else:
k = Arrhenius(A = kf[0], n = kf[1], E = kf[2])
k.build(kfnode, unit_fctr, gas_species = self._igspecies,
name = nm, rxn_phase = self._rxnphase)
# set values for low-pressure rate coeff if falloff rxn
mdim += 1
ldim -= 3
nm = 'k0'
str = str[:-1]
r.addChild('reactants',str)
str = ''
for s in self._p.keys():
ns = self._p[s]
str += s+':'+`ns`+' '
str = str[:-1]
r.addChild('products',str)
return r
#-------------------
class three_body_reaction(reaction):
def __init__(self,
equation = '',
kf = None,
efficiencies = '',
id = '',
options = []
):
reaction.__init__(self, equation, kf, id, '', options)
self._type = 'threeBody'
self._effm = 1.0
self._eff = efficiencies
# clean up reactant and product lists
for r in self._r.keys():
if r == 'M' or r == 'm':
del self._r[r]
for p in self._p.keys():
if p == 'M' or p == 'm':
del self._p[p]
def build(self, p):
r = reaction.build(self, p)
if r == 0: return
kfnode = r.child('rateCoeff')
if self._eff:
eff = kfnode.addChild('efficiencies',self._eff)
eff['default'] = `self._effm`
#---------------
class falloff_reaction(reaction):
def __init__(self,
equation = '',
kf0 = None,
kf = None,
efficiencies = '',
falloff = None,
id = '',
options = []
):
kf2 = (kf, kf0)
reaction.__init__(self, equation, kf2, id, '', options)
self._type = 'falloff'
# use a Lindemann falloff function by default
self._falloff = falloff
if self._falloff == None:
self._falloff = Lindemann()
self._effm = 1.0
self._eff = efficiencies
# clean up reactant and product lists
del self._r['(+']
del self._p['(+']
if self._r.has_key('M)'):
del self._r['M)']
del self._p['M)']
if self._r.has_key('m)'):
del self._r['m)']
del self._p['m)']
else:
for r in self._r.keys():
if r[-1] == ')' and r.find('(') < 0:
if self._eff:
raise CTI_Error('(+ '+mspecies+') and '+self._eff+' cannot both be specified')
self._eff = r[-1]+':1.0'
self._effm = 0.0
del self._r[r]
del self._p[r]
def build(self, p):
r = reaction.build(self, p)
if r == 0: return
kfnode = r.child('rateCoeff')
if self._eff and self._effm >= 0.0:
eff = kfnode.addChild('efficiencies',self._eff)
eff['default'] = `self._effm`
if self._falloff:
self._falloff.build(kfnode)
class surface_reaction(reaction):
def __init__(self,
equation = '',
kf = None,
id = '',
order = '',
options = []):
reaction.__init__(self, equation, kf, id, order, options)
self._type = 'surface'
class edge_reaction(reaction):
def __init__(self,
equation = '',
kf = None,
id = '',
order = '',
beta = 0.0,
options = []):
reaction.__init__(self, equation, kf, id, order, options)
self._type = 'edge'
self._beta = beta
#--------------
class state:
def __init__(self,
temperature = None,
pressure = None,
mole_fractions = None,
mass_fractions = None,
density = None,
coverages = None):
self._t = temperature
self._p = pressure
self._rho = density
self._x = mole_fractions
self._y = mass_fractions
self._c = coverages
def build(self, ph):
st = ph.addChild('state')
if self._t: addFloat(st, 'temperature', self._t, defunits = 'K')
if self._p: addFloat(st, 'pressure', self._p, defunits = _upres)
if self._rho: addFloat(st, 'density', self._rho, defunits = _umass+'/'+_ulen+'3')
if self._x: st.addChild('moleFractions', self._x)
if self._y: st.addChild('massFractions', self._y)
if self._c: st.addChild('coverages', self._c)
class phase:
"""Base class for phases of matter."""
def __init__(self,
name = '',
dim = 3,
elements = '',
species = '',
reactions = 'none',
initial_state = None,
options = []):
self._name = name
self._dim = dim
self._el = elements
self._sp = []
self._rx = []
if type(options) == types.StringType:
self._options = [options]
else:
self._options = options
self.debug = 0
if 'debug' in options:
self.debug = 1
#--------------------------------
# process species
#--------------------------------
# if a single string is entered, make it a list
if type(species) == types.StringType:
self._species = [species]
else:
self._species = species
self._skip = 0
# dictionary of species names
self._spmap = {}
# for each species string, check whether or not the species
# are imported or defined locally. If imported, the string
# contains a colon (:)
for sp in self._species:
icolon = sp.find(':')
if icolon > 0:
#datasrc, spnames = sp.split(':')
datasrc = sp[:icolon].strip()
spnames = sp[icolon+1:]
self._sp.append((datasrc+'.xml', spnames))
else:
spnames = sp
self._sp.append(('', spnames))
# strip the commas, and make the list of species names
# 10/31/03: commented out the next line, so that species names may contain commas
#sptoks = spnames.replace(',',' ').split()
sptoks = spnames.split()
for s in sptoks:
# check for stray commas
if s <> ',':
if s[0] == ',': s = s[1:]
if s[-1] == ',': s = s[:-1]
if s <> 'all' and self._spmap.has_key(s):
raise CTI_Error('Multiply-declared species '+s+' in phase '+self._name)
self._spmap[s] = self._dim
self._rxns = reactions
# check that species have been declared
if len(self._spmap) == 0:
raise CTI_Error('No species declared for phase '+self._name)
# and that only one species is declared if it is a pure phase
if self.is_pure() and len(self._spmap) > 1:
raise CTI_Error('Stoichiometric phases must declare exactly one species, \n'+
'but phase '+self._name+' declares '+`len(self._spmap)`+'.')
self._initial = initial_state
# add this phase to the global phase list
global _phases
_phases.append(self)
def is_ideal_gas(self):
"""True if the entry represents an ideal gas."""
return 0
def is_pure(self):
return 0
def has_species(self, s):
"""Return 1 is a species with name 's' belongs to the phase,
or 0 otherwise."""
if self._spmap.has_key(s): return 1
return 0
def conc_dim(self):
"""Concentration dimensions. Used in computing the units for reaction
rate coefficients."""
return (1, -self._dim)
def buildrxns(self, p):
if type(self._rxns) == types.StringType:
self._rxns = [self._rxns]
# for each reaction string, check whether or not the reactions
# are imported or defined locally. If imported, the string
# contains a colon (:)
for r in self._rxns:
icolon = r.find(':')
if icolon > 0:
#datasrc, rnum = r.split(':')
datasrc = r[:icolon].strip()
rnum = r[icolon+1:]
self._rx.append((datasrc+'.xml', rnum))
else:
rnum = r
self._rx.append(('', rnum))
for r in self._rx:
datasrc = r[0]
ra = p.addChild('reactionArray')
ra['datasrc'] = datasrc+'#reaction_data'
if 'skip_undeclared_species' in self._options:
rk = ra.addChild('skip')
rk['species'] = 'undeclared'
rtoks = r[1].split()
if rtoks[0] <> 'all':
i = ra.addChild('include')
#i['prefix'] = 'reaction_'
i['min'] = rtoks[0]
if len(rtoks) > 2 and (rtoks[1] == 'to' or rtoks[1] == '-'):
i['max'] = rtoks[2]
else:
i['max'] = rtoks[0]
def build(self, p):
p.addComment(' phase '+self._name+' ')
ph = p.addChild('phase')
ph['id'] = self._name
ph['dim'] = `self._dim`
# ------- error tests -------
#err = ph.addChild('validation')
#err.addChild('duplicateReactions','halt')
#err.addChild('thermo','warn')
e = ph.addChild('elementArray',self._el)
e['datasrc'] = 'elements.xml'
for s in self._sp:
datasrc, names = s
sa = ph.addChild('speciesArray',names)
sa['datasrc'] = datasrc+'#species_data'
if 'skip_undeclared_elements' in self._options:
sk = sa.addChild('skip')
sk['element'] = 'undeclared'
if self._rxns <> 'none':
self.buildrxns(ph)
#self._eos.build(ph)
if self._initial:
self._initial.build(ph)
return ph
class ideal_gas(phase):
"""An ideal gas mixture."""
def __init__(self,
name = '',
elements = '',
species = '',
reactions = 'none',
kinetics = 'GasKinetics',
transport = 'None',
initial_state = None,
options = []):
phase.__init__(self, name, 3, elements, species, reactions,
initial_state, options)
self._pure = 0
self._kin = kinetics
self._tr = transport
if self.debug:
print 'Read ideal_gas entry '+self._name
try:
print 'in file '+__name__
except:
pass
def build(self, p):
ph = phase.build(self, p)
e = ph.addChild("thermo")
e['model'] = 'IdealGas'
k = ph.addChild("kinetics")
k['model'] = self._kin
t = ph.addChild('transport')
t['model'] = self._tr
def is_ideal_gas(self):
return 1
class stoichiometric_solid(phase):
"""A solid compound or pure element.Stoichiometric solid phases
contain exactly one species, which always has unit activity. The
solid is assumed to have constant density. Therefore the rates of
reactions involving these phases do not contain any concentration
terms for the (one) species in the phase, since the concentration
is always the same. """
def __init__(self,
name = '',
elements = '',
species = '',
density = -1.0,
transport = 'None',
initial_state = None,
options = []):
phase.__init__(self, name, 3, elements, species, 'none',
initial_state, options)
self._dens = density
self._pure = 1
if self._dens < 0.0:
raise CTI_Error('density must be specified.')
self._tr = transport
def conc_dim(self):
"""A stoichiometric solid always has unit activity, so the
generalized concentration is 1 (dimensionless)."""
return (0,0)
def build(self, p):
ph = phase.build(self, p)
e = ph.addChild("thermo")
e['model'] = 'StoichSubstance'
addFloat(e, 'density', self._dens, defunits = _umass+'/'+_ulen+'3')
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class stoichiometric_liquid(stoichiometric_solid):
"""A stoichiometric liquid. Currently, there is no distinction
between stoichiometric liquids and solids."""
def __init__(self,
name = '',
elements = '',
species = '',
density = -1.0,
transport = 'None',
initial_state = None,
options = []):
stoichiometric_solid.__init__(self, name, elements,
species, density, transport,
initial_state, options)
class metal(phase):
"""A metal."""
def __init__(self,
name = '',
elements = '',
species = '',
density = -1.0,
transport = 'None',
initial_state = None,
options = []):
phase.__init__(self, name, 3, elements, species, 'none',
initial_state, options)
self._dens = density
self._pure = 0
self._tr = transport
def conc_dim(self):
return (0,0)
def build(self, p):
ph = phase.build(self, p)
e = ph.addChild("thermo")
e['model'] = 'Metal'
addFloat(e, 'density', self._dens, defunits = _umass+'/'+_ulen+'3')
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class incompressible_solid(phase):
"""An incompressible solid."""
def __init__(self,
name = '',
elements = '',
species = '',
density = -1.0,
transport = 'None',
initial_state = None,
options = []):
phase.__init__(self, name, 3, elements, species, 'none',
initial_state, options)
self._dens = density
self._pure = 0
if self._dens < 0.0:
raise CTI_Error('density must be specified.')
self._tr = transport
def conc_dim(self):
return (1,-3)
def build(self, p):
ph = phase.build(self, p)
e = ph.addChild("thermo")
e['model'] = 'Incompressible'
addFloat(e, 'density', self._dens, defunits = _umass+'/'+_ulen+'3')
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class lattice(phase):
def __init__(self, name = '',
elements = '',
species = '',
reactions = 'none',
transport = 'None',
initial_state = None,
options = [],
site_density = -1.0,
vacancy_species = ''):
phase.__init__(self, name, 3, elements, species, 'none',
initial_state, options)
self._tr = transport
self._n = site_density
self._vac = vacancy_species
self._species = species
if name == '':
raise CTI_Error('sublattice name must be specified')
if species == '':
raise CTI_Error('sublattice species must be specified')
if site_density < 0.0:
raise CTI_Error('sublattice '+name
+' site density must be specified')
def build(self,p, visible = 0):
#if visible == 0:
# return
ph = phase.build(self, p)
e = ph.addChild('thermo')
e['model'] = 'Lattice'
addFloat(e, 'site_density', self._n, defunits = _umol+'/'+_ulen+'3')
if self._vac:
e.addChild('vacancy_species',self._vac)
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class lattice_solid(phase):
"""A solid crystal consisting of one or more sublattices."""
def __init__(self,
name = '',
elements = '',
species = '',
lattices = [],
transport = 'None',
initial_state = None,
options = []):
# find elements
elist = []
for lat in lattices:
e = lat._el.split()
for el in e:
if not el in elist:
elist.append(el)
elements = string.join(elist)
# find species
slist = []
for lat in lattices:
_sp = ""
for spp in lat._species:
_sp = _sp + spp
s = _sp.split()
for sp in s:
if not sp in slist:
slist.append(sp)
species = string.join(slist)
phase.__init__(self, name, 3, elements, species, 'none',
initial_state, options)
self._lattices = lattices
if lattices == []:
raise CTI_Error('One or more sublattices must be specified.')
self._pure = 0
self._tr = transport
def conc_dim(self):
return (0,0)
def build(self, p):
ph = phase.build(self, p)
e = ph.addChild("thermo")
e['model'] = 'LatticeSolid'
if self._lattices:
lat = e.addChild('LatticeArray')
for n in self._lattices:
n.build(lat, visible = 1)
if self._tr:
t = ph.addChild('transport')
t['model'] = self._tr
k = ph.addChild("kinetics")
k['model'] = 'none'
class liquid_vapor(phase):
"""A fluid with a complete liquid/vapor equation of state.
This entry type selects one of a set of predefined fluids with
built-in liquid/vapor equations of state. The substance_flag
parameter selects the fluid. See purefluids.py for the usage
of this entry type."""
def __init__(self,
name = '',
elements = '',
species = '',
substance_flag = 0,
initial_state = None,
options = []):
phase.__init__(self, name, 3, elements, species, 'none',
initial_state, options)
self._subflag = substance_flag
self._pure = 1
def conc_dim(self):
return (0,0)
def build(self, p):
ph = phase.build(self, p)
e = ph.addChild("thermo")
e['model'] = 'PureFluid'
e['fluid_type'] = `self._subflag`
k = ph.addChild("kinetics")
k['model'] = 'none'
class ideal_interface(phase):
"""An ideal interface."""
def __init__(self,
name = '',
elements = '',
species = '',
reactions = 'none',
site_density = 0.0,
phases = [],
kinetics = 'Interface',
transport = 'None',
initial_state = None,
options = []):
self._type = 'surface'
phase.__init__(self, name, 2, elements, species, reactions,
initial_state, options)
self._pure = 0
self._kin = kinetics
self._tr = transport
self._phases = phases
self._sitedens = site_density
def build(self, p):
ph = phase.build(self, p)
e = ph.addChild("thermo")
e['model'] = 'Surface'
addFloat(e, 'site_density', self._sitedens, defunits = _umol+'/'+_ulen+'2')
k = ph.addChild("kinetics")
k['model'] = self._kin
t = ph.addChild('transport')
t['model'] = self._tr
p = ph.addChild('phaseArray',self._phases)
def conc_dim(self):
return (1, -2)
class edge(phase):
"""A 1D boundary between two surface phases."""
def __init__(self,
name = '',
elements = '',
species = '',
reactions = 'none',
site_density = 0.0,
phases = [],
kinetics = 'Edge',
transport = 'None',
initial_state = None,
options = []):
self._type = 'edge'
phase.__init__(self, name, 1, elements, species, reactions,
initial_state, options)
self._pure = 0
self._kin = kinetics
self._tr = transport
self._phases = phases
self._sitedens = site_density
def build(self, p):
ph = phase.build(self, p)
e = ph.addChild("thermo")
e['model'] = 'Edge'
addFloat(e, 'site_density', self._sitedens, defunits = _umol+'/'+_ulen)
k = ph.addChild("kinetics")
k['model'] = self._kin
t = ph.addChild('transport')
t['model'] = self._tr
p = ph.addChild('phaseArray',self._phases)
def conc_dim(self):
return (1, -1)
#-------------------------------------------------------------------
# falloff parameterizations
class Troe:
def __init__(self, A = 0.0, T3 = 0.0, T1 = 0.0, T2 = -999.9):
if T2 <> -999.9:
self._c = (A, T3, T1, T2)
else:
self._c = (A, T3, T1)
def build(self, p):
s = ''
for num in self._c:
s += '%g ' % num
f = p.addChild('falloff', s)
f['type'] = 'Troe'
class SRI:
def __init__(self, A = 0.0, B = 0.0, C = 0.0, D = -999.9, E=-999.9):
if D <> -999.9 and E <> -999.9:
self._c = (A, B, C, D, E)
else:
self._c = (A, B, C)
def build(self, p):
s = ''
for num in self._c:
s += '%g ' % num
f = p.addChild('falloff', s)
f['type'] = 'SRI'
class Lindemann:
def __init__(self):
pass
def build(self, p):
f = p.addChild('falloff')
f['type'] = 'Lindemann'
#get_atomic_wts()
validate()
if __name__ == "__main__":
import sys, os
file = sys.argv[1]
base = os.path.basename(file)
root, ext = os.path.splitext(base)
dataset(root)
execfile(file)
write()
|
blurock/CANTERA
|
RunIgnition/examples/RunIgnition/ctml_writer.py
|
Python
|
gpl-3.0
| 51,733
|
[
"CRYSTAL"
] |
057570af94a1595452c3a15dc09673bda3731c4b779e7b34f4e7018145a35893
|
import numpy as np
import logging
from collections import defaultdict, Counter
from itertools import izip, imap
from operator import itemgetter
from tqdm import tqdm
from cityhash import CityHash32
from .containers import RandomChoiceDict
class NDHashable(object):
"""Turn NumPy NDArray into a hashable object
A more sparse alternative to the default Python hash() method.
"""
def __init__(self, arr):
self.data = arr.tostring()
def __eq__(self, other):
return self.data == other.data
def __hash__(self):
return CityHash32(self.data)
class GaussianLSH(object):
"""Locality-sensitive hashing for nearest neighbor lookups
Designed with updatable indexes in mind. Assumes that the input vectors
are already normalized.
The implementation is based on:
Malcolm Slaney, Yury Lifshits, and Junfeng He "Optimal Parameters for
Locality-Sensitive Hashing" Proceedings of the IEEE, vol.100, no.9,
pp.2604-2623, September 2012
doi: 10.1109/JPROC.2012.2193849
URL: http://dx.doi.org/10.1109/JPROC.2012.2193849
"""
def __init__(self, L, k, rank, w=0.5, normalize_inputs=False,
dtype=np.float32):
"""Default constructor
:param L: number of tables
:param k: bit depth for within-table lookups
:param rank: rank (size) of the input vectors
:param w: quantization window parameter (default: 0.5)
:param normalize_inputs: will bring inputs to L2 norm before indexing
"""
self.w_ = w
self.dtype = dtype
self.gauss_ = self._create_gauss(L, k, rank, dtype=self.dtype)
self.shift_ = self._create_shift(L, k, w, dtype=self.dtype)
self.tables = [defaultdict(set) for _ in xrange(L)]
self.vectors = RandomChoiceDict()
self._second = itemgetter(1)
self._similarity = np.dot
self._normalize = normalize_inputs
@staticmethod
def _create_gauss(L, k, rank, dtype=np.float32):
"""Generate Gaussian projection vectors
:param L: number of tables
:param k: bit depth for within-table lookups
:param rank: rank (size) of the input vectors
"""
vec = np.asarray(np.random.normal(size=(L, k, rank)), dtype=dtype)
norm = np.linalg.norm(vec, axis=-1)
vec /= norm.reshape(L, k, 1)
return vec
@staticmethod
def _create_shift(L, k, w=0.5, dtype=np.float32):
"""Generate shift parameter `b`
:param L: number of tables
:param k: bit depth for within-table lookups
:param w: quantization window parameter (default: 0.5)
"""
return np.asarray(np.random.uniform(size=(L, k)), dtype=dtype) * w
def _iter_hashes(self, vector):
"""iterate over hashes
:param vector: vector (NumPy array)
"""
projection = self.gauss_.dot(vector)
quantized = np.floor((projection + self.shift_) /
self.w_).astype(np.int8)
keys = imap(NDHashable, quantized)
return izip(self.tables, keys)
def insert_rows(self, rows, total=None):
"""Insert iterrows() result from [label, vector] table
:param rows: Pandas dataframe iterrows() result
:param total: expected number of rows
"""
n_skipped = 0
for _, (label, vector) in tqdm(rows, total=total):
if self.insert(label, vector) == 0:
n_skipped += 1
if n_skipped > 0:
logging.warn("%d items skipped", n_skipped)
def insert(self, label, vector, update=False):
"""Insert item
:param label: label belonging to the inserted item
:param vector: vector belonging to the inserted item
:param update: if true, remove indices belonging to existing
item if one exists and shares the same label
"""
if update:
self.remove(label, indices_only=True)
vector = np.asarray(vector, self.dtype)
if self._normalize:
vector = vector / np.linalg.norm(vector)
if np.isnan(vector).any():
return 0
self.vectors[label] = vector
for table, key in self._iter_hashes(vector):
table[key].add(label)
return 1
def upsert(self, label, vector):
"""Update vector (inserts new item if not found)
:param label: label of the item to update
:param vector: vector to update the label to
"""
self.insert(label, vector, update=True)
def remove(self, label, indices_only=False):
"""Remove item and associated indices
:param label: label of the item to remove
:param indices_only: if True, will not erase value (vector)
"""
vectors = self.vectors
if label in vectors:
old_vector = vectors[label]
for table, key in self._iter_hashes(old_vector):
if key in table:
labels = table[key]
labels.remove(label)
if len(labels) == 0:
del table[key]
if not indices_only:
del vectors[label]
def _candidates(self, vector, search_k=None, drop_labels=()):
"""Generate candidates for further analysis
:param vector: vector to lookup (NumPy array of floats)
:param search_k: maximum number of candidates to generate
:param drop_labels: list of labels to discard from query
"""
c = Counter()
vectors = self.vectors
for table, key in self._iter_hashes(vector):
if key in table:
c.update(table[key])
for label in drop_labels:
if label in c:
del c[label]
return [label for label, _ in c.most_common(search_k)
if label in vectors]
def query_vector(self, vector, limit=None, drop_labels=(),
ensure_limit=False, search_k=None, _allow_norm=True):
"""Query a vector
:param vector: vector to lookup (NumPy array of floats)
:param limit: number of neighbors to evaluate
:param drop_labels: list of labels to discard from query
:param ensure_limit: If True, randomly sample neighbors until limit
:param search_k: how many candidates to search
"""
if search_k is None and limit is not None:
search_k = limit * len(self.tables)
if _allow_norm and self._normalize:
vector = np.asarray(vector, self.dtype)
vector = vector / np.linalg.norm(vector)
if np.isnan(vector).any():
logging.warn("NaNs present in input")
return []
labels = self._candidates(
vector, search_k, drop_labels=drop_labels)
vectors = self.vectors
if ensure_limit:
n = limit - len(labels)
for label in vectors.sample_keys(n, replace=False):
if label not in drop_labels:
labels.append(label)
cand_vectors = np.array([vectors[label] for label in labels])
similarities = self._similarity(cand_vectors, vector)
return sorted(izip(labels, similarities),
key=self._second, reverse=True)[:limit]
def query_label(self, label, limit=None, drop_self=False,
ensure_limit=False, search_k=None):
"""Query a label for an item already in the index
:param label: label to lookup
:param limit: number of neighbors to return
:param drop_self: whether to drop query label from results
:param ensure_limit: if True, randomly sample neighbors until limit
:param search_k: how many candidates to search
"""
vector = self.vectors[label]
drop_labels = [label] if drop_self else []
return self.query_vector(
vector, limit=limit, drop_labels=drop_labels,
ensure_limit=ensure_limit, search_k=search_k, _allow_norm=False)
|
escherba/annx
|
annx/pyann/gauss.py
|
Python
|
mit
| 8,078
|
[
"Gaussian"
] |
f26e4156b6d3638b25793e0785ab237ed57e1acb10c35c3384af64d71cc949ca
|
"""
Single Bubble Model: Droplet simulations
=========================================
Use the ``TAMOC`` `single_bubble_model` to simulate the trajectory of a light
oil droplet rising through the water column. This script demonstrates the
typical steps involved in running the single bubble model.
It uses the ambient data stored in the file `../test/output/test_bm54.nc`,
created by the `test_ambient` module. Please make sure all tests have
passed before running this script or modify the script to use a different
source of ambient data.
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
from tamoc import ambient
from tamoc import dbm
from tamoc import seawater
from tamoc import single_bubble_model
import numpy as np
if __name__ == '__main__':
# Open an ambient profile object from the netCDF dataset
nc = '../../test/output/test_bm54.nc'
bm54 = ambient.Profile(nc, chem_names='all')
bm54.close_nc()
# Initialize a single_bubble_model.Model object with this data
sbm = single_bubble_model.Model(bm54)
# Create a light oil droplet particle to track
composition = ['benzene', 'toluene', 'ethylbenzene']
drop = dbm.FluidParticle(composition, fp_type=1.)
# Set the mole fractions of each component at release.
mol_frac = np.array([0.4, 0.3, 0.3])
# Specify the remaining particle initial conditions
de = 0.0005
z0 = 1000.
T0 = 273.15 + 30.
# Simulate the trajectory through the water column and plot the results
sbm.simulate(drop, z0, de, mol_frac, T0, K=0., K_T=1., fdis=1.e-8,
lag_time=False, delta_t=10.)
sbm.post_process()
# Save the simulation to a netCDF file
sbm.save_sim('./drop_biodeg.nc', '../../test/output/test_bm54.nc',
'Results of ./drop_biodeg.py script')
# Save the data for importing into Matlab
sbm.save_txt('./drop_biodeg.txt', '../../test/output/test_bm54.nc',
'Results of ./drop_biodeg.py script')
|
socolofs/tamoc
|
bin/sbm/drop_biodeg.py
|
Python
|
mit
| 2,096
|
[
"NetCDF"
] |
4b3da33dc60f7baa8884c08dfb67db9aaafdb4f6355f29aef709694c2ba94956
|
from collections import defaultdict
import toolshed as ts
import numpy as np
import pandas as pd
from .crystal import Feature, CountFeature
import os
def example_random_cluster(n_samples=20, n_sites=4, seed=42):
np.random.seed(seed)
if n_samples % 2 != 0: n_samples += 1
covs = pd.DataFrame({'gender': ['F'] * (n_samples / 2) + ['M'] * (n_samples / 2),
'age': np.random.uniform(10, 25, size=n_samples) })
methylation = np.random.normal(0.15, 0.75, size=(n_sites, n_samples))
cluster = [Feature('chr1', (i + 1) * 10, m) for i, m in enumerate(methylation)]
covs['id'] = ['id_%i' %i for i in range(len(covs))]
return covs, cluster
def real_cluster():
path = os.path.join(os.path.dirname(__file__), "tests")
meth = pd.read_csv('%s/real_cluster.csv' % path, index_col=0)
chroms = [x.split(":")[0] for x in meth.index]
starts = [int(x.split(":")[1]) for x in meth.index]
cluster = [Feature(chroms[i], starts[i], np.array(meth.ix[i, :])) for i in
range(len(meth))]
covs = pd.read_csv('%s/covs.csv' % path)
return covs, cluster
def real_count_cluster():
path = os.path.join(os.path.dirname(__file__), "tests")
c = pd.read_csv('%s/m.counts.csv' % path, index_col=0)
m = pd.read_csv('%s/m.methylated.csv' % path, index_col=0)
chroms = [x.split(":")[0] for x in m.index]
starts = [int(x.split(":")[1]) for x in c.index]
cluster = [CountFeature(chroms[i], starts[i],
np.array(m.ix[i, :]),
np.array(c.ix[i, :]))
for i in range(len(m))]
covs = pd.read_table('%s/m.covs.txt' % path)
return covs, cluster
def write_cluster(cluster, fh, float_format="%.4f", count_fh=None):
"""
Write a cluster to file.
Parameters
----------
cluster : cluster
a cluster from aclust (or just a list of features)
fh : filehandle
count_fh : filehandle
if cluster is of `CountFeature` then a count_fh must be
specified so that the counts can be written to file as
well.
"""
fmt = "{chrom}:{position}\t{values}\n"
if isinstance(cluster[0], Feature):
for f in cluster:
values = "\t".join((float_format % v for v in f.values))
fh.write(fmt.format(chrom=f.chrom, position=f.position, values=values))
elif isinstance(cluster[0], CountFeature):
assert count_fh is not None
for f in cluster:
fh.write(fmt.format(chrom=f.chrom, position=f.position,
values="\t".join(f.methylated)))
count_fh.write(fmt.format(chrom=f.chrom, position=f.position,
values="\t".join(f.counts)))
def roc_out(p_bed, p_col, truth_region_bed, exclude=('-1', 'NA', 'nan')):
"""Create ROC for a bed file of p-values given known truth regions.
Parameters
----------
p_bed : file
p_col : int
column containing the p-value from `p_bed`
truth_region_bed : file
contains the true regions
"""
p_col -= 1 # 0-based
regions = defaultdict(list)
for toks in ts.reader(truth_region_bed, header=False):
if not (toks[1] + toks[2]).isdigit(): continue
regions[toks[0]].append((int(toks[1]), int(toks[2])))
truths = []
vals = []
for toks in ts.reader(p_bed, header=False):
if not (toks[1] + toks[2]).isdigit(): continue
reg = regions[toks[0]]
s, e = int(toks[1]), int(toks[2])
p = toks[p_col]
if p in exclude: continue
vals.append(1.0 - float(p))
truth = any(rs <= s <= re or rs <= e <= re for rs, re in reg)
truths.append(truth)
return np.array(truths).astype(int), np.array(vals)
|
kidaa/crystal-1
|
crystal/utils.py
|
Python
|
mit
| 3,815
|
[
"CRYSTAL"
] |
870ed8eceafc92f6c1bcc76abb461d8256ce7450d65a10913344cc2e187ccd0d
|
import numpy as np
def get_key_to_indexes_dic(labels):
"""
Builds a dictionary whose keys are the labels and whose
items are all the indexes that have that particular key.
"""
# Get the unique labels and initialize the dictionary
label_set = set(labels)
key_to_indexes = {}
for label in label_set:
key_to_indexes[label] = np.where(labels == label)[0]
return key_to_indexes
def sample_blobs(X, y, sampling_list):
"""
Sample the data from X in a way that you extract the
percentages from each cluster in the proportions
passed in sampling_list.
"""
sampling_indexes = []
label_to_index = get_key_to_indexes_dic(y)
for (label, indexes), coefficient in zip(label_to_index.items(), sampling_list):
aux = int(len(indexes) * coefficient)
sampling_indexes.append(indexes[:aux])
sampling_indexes = np.concatenate(sampling_indexes)
X_sampled = X[sampling_indexes, :]
y_sampled = y[sampling_indexes]
return X_sampled, y_sampled
def calculate_distortion(neurons, data, labels):
"""
This functions returns the mean distortion
calculate by taking the distance between the
each point and the neuron that is labeld with
"""
point_distortion = np.zeros_like(labels)
for index, x in enumerate(data):
distance = np.linalg.norm(x - neurons[labels[index]])
point_distortion[index] = distance
return np.mean(point_distortion)
|
h-mayorquin/competitive_and_selective_learning
|
functions.py
|
Python
|
mit
| 1,475
|
[
"NEURON"
] |
4d7871e222d9be177d7dfa9912678ed603225def7215dcb04cdc8314394f1d35
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import numpy as np
from pyemma.coordinates.data import MDFeaturizer
from logging import getLogger
import pyemma.coordinates.api as api
import pyemma.util.types as types
import pkg_resources
from pyemma.util.files import TemporaryDirectory
logger = getLogger('pyemma.'+'TestReaderUtils')
def convert_traj(file, format, top, dir=None):
import subprocess
from mdtraj.scripts import mdconvert
if dir is not None:
outname = os.path.basename(file)
else:
outname = file
out = '{dir}{name}.{format}'.format(format=format, name=outname, dir=dir if dir is not None else '')
import sys
subprocess.check_call([sys.executable, '-m', 'mdtraj.scripts.mdconvert', file, '-o', out, '-t', top])
#subprocess.check_call(['env'], env=os.environ)
return out
class TestSource(unittest.TestCase):
def setUp(self):
path = pkg_resources.resource_filename('pyemma.coordinates.tests', 'data') + os.path.sep
self.pdb_file = os.path.join(path, 'bpti_ca.pdb')
self.traj_files = [
os.path.join(path, 'bpti_001-033.xtc'),
os.path.join(path, 'bpti_067-100.xtc')
]
def test_read_multiple_files_topology_file(self):
reader = api.source(self.traj_files, top=self.pdb_file)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file, "Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, self.traj_files, "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_read_multiple_files_featurizer(self):
featurizer = MDFeaturizer(self.pdb_file)
reader = api.source(self.traj_files, features=featurizer)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file, "Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, self.traj_files, "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_read_single_file_toplogy_file(self):
reader = api.source(self.traj_files[0], top=self.pdb_file)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file, "Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, [self.traj_files[0]], "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_read_single_file_featurizer(self):
featurizer = MDFeaturizer(self.pdb_file)
reader = api.source(self.traj_files[0], features=featurizer)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file, "Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, [self.traj_files[0]], "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_invalid_input(self):
# neither featurizer nor topology file given
self.assertRaises(ValueError, api.source, self.traj_files, None, None)
# no input files but a topology file
self.assertRaises(ValueError, api.source, None, None, self.pdb_file)
featurizer = MDFeaturizer(self.pdb_file)
# no input files but a featurizer
self.assertRaises(ValueError, api.source, None, featurizer, None)
# empty list of input files
self.assertRaises(ValueError, api.source, [], None, self.pdb_file)
# empty tuple of input files
self.assertRaises(ValueError, api.source, (), None, self.pdb_file)
def test_invalid_files(self):
# files do not have the same extension
self.assertRaises(ValueError, api.source, self.traj_files.append(self.pdb_file), None, self.pdb_file)
# files list contains something else than strings
self.assertRaises(ValueError, api.source, self.traj_files.append([2]), None, self.pdb_file)
# input file is directory
root_dir = os.path.abspath(os.sep)
self.assertRaises(ValueError, api.source, root_dir, None, self.pdb_file)
def test_h5_mdtraj_vs_plain(self):
with TemporaryDirectory() as td:
f = convert_traj(self.traj_files[0], format='h5', dir=td, top=self.pdb_file)
r = api.source(f, top=self.pdb_file)
from pyemma.coordinates.data import FeatureReader
self.assertIsInstance(r, FeatureReader)
import h5py
from pyemma.coordinates.data.h5_reader import H5Reader
plain_h5_file = os.path.join(td, 'f.h5')
with h5py.File(plain_h5_file) as fh:
fh.create_dataset('test', data=np.random.random((100, 3)))
r = api.source(plain_h5_file)
self.assertIsInstance(r, H5Reader)
class TestSourceCallAll(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = pkg_resources.resource_filename(__name__, 'data') + os.path.sep
cls.pdb_file = os.path.join(path, 'bpti_ca.pdb')
cls.xtc_file = os.path.join(path, 'bpti_mini.xtc')
cls.inp = api.source(cls.xtc_file, top=cls.pdb_file)
def setUp(self):
pass
def test_chunksize(self):
assert types.is_int(self.inp.chunksize)
def test_describe(self):
desc = self.inp.describe()
assert types.is_string(desc) or types.is_list_of_string(desc)
def test_dimension(self):
assert types.is_int(self.inp.dimension())
def test_featurizer(self):
# must have a featurizer
assert self.inp.featurizer is not None
def test_get_output(self):
O = self.inp.get_output()
assert types.is_list(O)
assert len(O) == 1
assert types.is_float_matrix(O[0])
assert O[0].shape[0] == 100
assert O[0].shape[1] == self.inp.dimension()
def test_in_memory(self):
assert isinstance(self.inp.in_memory, bool)
def test_iterator(self):
self.inp.chunksize = 100
assert self.inp.chunksize == 100
for itraj, chunk in self.inp:
assert types.is_int(itraj)
assert types.is_float_matrix(chunk)
assert chunk.shape[0] == self.inp.chunksize
assert chunk.shape[1] == self.inp.dimension()
def test_n_frames_total(self):
# map not defined for source
self.inp.n_frames_total() == 100
def test_number_of_trajectories(self):
# map not defined for source
self.inp.number_of_trajectories() == 1
def test_output_type(self):
assert self.inp.output_type() == np.float32()
def test_topfile(self):
types.is_string(self.inp.topfile)
def test_trajectory_length(self):
assert self.inp.trajectory_length(0) == 100
with self.assertRaises(IndexError):
self.inp.trajectory_length(1)
def test_trajectory_lengths(self):
assert len(self.inp.trajectory_lengths()) == 1
assert self.inp.trajectory_lengths()[0] == self.inp.trajectory_length(0)
def test_trajfiles(self):
assert types.is_list_of_string(self.inp.filenames)
if __name__ == "__main__":
unittest.main()
|
fabian-paul/PyEMMA
|
pyemma/coordinates/tests/test_source.py
|
Python
|
lgpl-3.0
| 9,194
|
[
"MDTraj"
] |
51fc7181aa1e70b3212991bd7be98115b3c6890d7fbb20cfadf61327bec1371d
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
***********************************************
**espresso.interaction.AngularUniquePotential**
***********************************************
"""
# -*- coding: iso-8859-1 -*-
from espresso import pmi
from espresso import toReal3DFromVector
from _espresso import interaction_AngularUniquePotential
# Python base class for angular potentials
class AngularUniquePotentialLocal(object):
def computeEnergy(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
return self.cxxclass.computeEnergy(self, arg0)
return self.cxxclass.computeEnergy(self, toReal3DFromVector(*args))
def computeForce(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1: # in case theta is passed
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
return self.cxxclass.computeForce(self, arg0)
return self.cxxclass.computeForce(self, toReal3DFromVector(*args))
if pmi.isController:
class AngularUniquePotential(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
localcall = [ 'computeForce', 'computeEnergy' ],
pmiproperty = [ 'cutoff' ]
)
|
BackupTheBerlios/espressopp
|
src/interaction/AngularUniquePotential.py
|
Python
|
gpl-3.0
| 2,263
|
[
"ESPResSo"
] |
8061622265049254eba919188619a1088802fecff8305a19cb8ed48b8545f302
|
# -*- coding: utf-8 -*-
import os
import datetime
import subprocess
from flask import Blueprint, jsonify, current_app
from flask.ext.misaka import markdown
from werkzeug.contrib.atom import AtomFeed
import codecs
import shutil
import math
from mako.template import Template
from mako.lookup import TemplateLookup
from summer.model.entry import Entry
bp = Blueprint('build', __name__)
BASE_DIR = './ghpages'
PAGE_DIR = BASE_DIR + '/page'
POSTS_DIR = BASE_DIR + '/posts'
TEMPLATE_DIR = './fe/template'
STATIC_DIR = BASE_DIR + '/static'
def build_index():
lookup = TemplateLookup(directories=[TEMPLATE_DIR])
template = Template(
filename=TEMPLATE_DIR + '/index.html', lookup=lookup)
page = 1
perpage = 5
entries = Entry.get_published_page(page)
total = len(Entry.get_all_published())
html_content = template.render(
entries=entries, total=total, page=page, perpage=perpage)
dist = os.path.join(BASE_DIR, 'index.html')
with codecs.open(dist, 'w', 'utf-8-sig') as f:
f.write(html_content)
def build_pages():
lookup = TemplateLookup(directories=[TEMPLATE_DIR])
template = Template(
filename=TEMPLATE_DIR + '/index.html', lookup=lookup)
all_entries = Entry.get_all_published(True)
length = len(all_entries)
for page in range(1, int(math.ceil(length / float(5))) + 1):
start = (page - 1) * 5
end = start + 5
entries = all_entries[start:end]
html_content = template.render(
entries=entries, total=length, page=page, perpage=5)
page_path = os.path.join(PAGE_DIR, str(page))
try:
os.mkdir(page_path)
except OSError:
pass
dist = os.path.join(page_path, 'index.html')
with codecs.open(dist, 'w', 'utf-8-sig') as f:
f.write(html_content)
def build_posts():
lookup = TemplateLookup(directories=[TEMPLATE_DIR])
template = Template(
filename=TEMPLATE_DIR + '/entry.html', lookup=lookup)
entries = Entry.get_all_published()
for _entry in entries:
post_title = _entry['title']
post_content = markdown(_entry['content'])
post_slug = _entry['slug']
date = _entry['date']
status = _entry['status']
entry = dict(
title=post_title,
content=post_content,
date=date,
id=_entry['slug'],
status=status
)
html_content = template.render(entry=entry)
os.mkdir(os.path.join(POSTS_DIR, post_slug))
dist = os.path.join(POSTS_DIR, post_slug + '/index.html')
with codecs.open(dist, 'w', 'utf-8-sig') as f:
f.write(html_content)
# TODO
def build_archive():
pass
# TODO
def build_tag():
# select * from entries where tag like '%mindfire%'
pass
def build_feed():
feed = AtomFeed(current_app.config['SITE_NAME'],
feed_url=current_app.config['DOMAIN'] + 'rss.xml',
url=current_app.config['DOMAIN'],
subtitle=current_app.config['SUBTITLE'],
author=current_app.config['AUTHOR'],
updated=datetime.datetime.now())
entries = Entry.get_all_published()
for _entry in entries:
time = datetime.datetime.strptime(_entry['date'], '%Y-%m-%d %H:%M:%S')
feed.add(unicode(_entry['title']),
unicode(markdown(_entry['content'])),
content_type='html',
author=current_app.config['AUTHOR'],
published=time,
updated=time,
id=current_app.config['DOMAIN'] + _entry['slug'] + '/',
url=current_app.config['DOMAIN'] + 'posts/' + _entry['slug'] + '/'
)
with codecs.open(BASE_DIR + '/rss.xml', 'w', 'utf-8-sig') as f:
f.write(feed.to_string())
@bp.route('/build', methods=['POST'])
def build():
if os.path.exists(PAGE_DIR):
shutil.rmtree(PAGE_DIR)
os.mkdir(PAGE_DIR)
if os.path.exists(POSTS_DIR):
shutil.rmtree(POSTS_DIR)
os.mkdir(POSTS_DIR)
# copy source files
# shutil.copytree('./fe/source', './ghpages/')
# index
build_index()
# page
build_pages()
# post
build_posts()
# TODO
# archive
# feed
build_feed()
# TODO
# site map
if os.path.exists(STATIC_DIR):
shutil.rmtree(STATIC_DIR)
subprocess.call(['gulp', 'release'])
return jsonify(r=True)
|
gaowhen/summer
|
summer/view/build/build.py
|
Python
|
mit
| 4,518
|
[
"GULP"
] |
3c19cba3d98af3e957d2e7224de8c7720b7c4afe8f81a05a8934b9a5eaf514df
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter
from .itemlist import ItemList
from .keyword import Keyword, Keywords
from .modelobject import ModelObject
from .tags import Tags
class TestCase(ModelObject):
"""Base model for single test case."""
__slots__ = ['parent', 'name', 'doc', 'timeout']
keyword_class = Keyword
def __init__(self, name='', doc='', tags=None, timeout=None):
#: :class:`~.model.testsuite.TestSuite` that contains this test.
self.parent = None
#: Test case name.
self.name = name
#: Test case documentation.
self.doc = doc
#: Test case tags, a list of strings.
self.tags = tags
#: Test case timeout.
self.timeout = timeout
#: Keyword results, a list of :class:`~.model.keyword.Keyword`
#: instances and contains also possible setup and teardown keywords.
self.keywords = None
@setter
def tags(self, tags):
return Tags(tags)
@setter
def keywords(self, keywords):
return Keywords(self.keyword_class, self, keywords)
@property
def id(self):
if not self.parent:
return 't1'
return '%s-t%d' % (self.parent.id, self.parent.tests.index(self)+1)
@property
def longname(self):
if not self.parent:
return self.name
return '%s.%s' % (self.parent.longname, self.name)
def visit(self, visitor):
visitor.visit_test(self)
class TestCases(ItemList):
__slots__ = []
def __init__(self, test_class=TestCase, parent=None, tests=None):
ItemList.__init__(self, test_class, {'parent': parent}, tests)
def _check_type_and_set_attrs(self, test):
ItemList._check_type_and_set_attrs(self, test)
for visitor in test.parent._visitors:
test.visit(visitor)
|
wojciechtanski/robotframework
|
src/robot/model/testcase.py
|
Python
|
apache-2.0
| 2,435
|
[
"VisIt"
] |
af2531e3f61f27e5bbee646227140ac6dee883345a484e1ad9139e0683f3a3f2
|
from electrum.i18n import _
fullname = _('Two Factor Authentication')
description = ''.join([
_("This plugin adds two-factor authentication to your wallet."), '<br/>',
_("For more information, visit"),
" <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
])
requires_wallet_type = ['2fa']
registers_wallet_type = ('twofactor', '2fa', _("Wallet with two-factor authentication"))
available_for = ['qt']
|
valesi/electrum
|
plugins/trustedcoin/__init__.py
|
Python
|
gpl-3.0
| 468
|
[
"VisIt"
] |
e47f410e73976cffb4cb304e0e3eef512434a71e1bcf629134ac65866f738cd6
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import fci
import pyscf.symm
from pyscf import mcscf
mol = gto.Mole()
mol.verbose = 0
mol.atom = '''
O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587'''
mol.basis = 'sto-3g'
mol.symmetry = 1
mol.build()
m = scf.RHF(mol)
m.conv_tol = 1e-15
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, scf.hf.get_hcore(mol), m.mo_coeff))
g2e = ao2mo.incore.full(m._eri, m.mo_coeff)
orbsym = pyscf.symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, m.mo_coeff)
cis = fci.direct_spin1_symm.FCISolver(mol)
cis.orbsym = orbsym
numpy.random.seed(15)
na = fci.cistring.num_strings(norb, nelec//2)
ci0 = numpy.random.random((na,na))
def tearDownModule():
global mol, m, h1e, g2e, ci0, cis
del mol, m, h1e, g2e, ci0, cis
class KnownValues(unittest.TestCase):
def test_contract(self):
ci1 = fci.addons.symmetrize_wfn(ci0, norb, nelec, orbsym, wfnsym=0)
ci1 = cis.contract_2e(g2e, ci1, norb, nelec, wfnsym=0)
self.assertAlmostEqual(numpy.linalg.norm(ci1), 83.016780379400785, 9)
ci1 = fci.addons.symmetrize_wfn(ci0, norb, nelec, orbsym, wfnsym=1)
ci1 = cis.contract_2e(g2e, ci1, norb, nelec, wfnsym=1)
self.assertAlmostEqual(numpy.linalg.norm(ci1), 82.295069645213317, 9)
ci1 = fci.addons.symmetrize_wfn(ci0, norb, nelec, orbsym, wfnsym=2)
ci1 = cis.contract_2e(g2e, ci1, norb, nelec, wfnsym=2)
self.assertAlmostEqual(numpy.linalg.norm(ci1), 82.256692620435118, 9)
ci1 = fci.addons.symmetrize_wfn(ci0, norb, nelec, orbsym, wfnsym=3)
ci1 = cis.contract_2e(g2e, ci1, norb, nelec, wfnsym=3)
self.assertAlmostEqual(numpy.linalg.norm(ci1), 81.343382883053323, 9)
def test_kernel(self):
e, c = fci.direct_spin1_symm.kernel(h1e, g2e, norb, nelec, orbsym=orbsym)
self.assertAlmostEqual(e, -84.200905534209554, 8)
e = fci.direct_spin1_symm.energy(h1e, g2e, c, norb, nelec)
self.assertAlmostEqual(e, -84.200905534209554, 8)
def test_fci_spin_square_nroots(self):
mol = gto.M(
verbose = 0,
atom = '''
O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587''',
basis = '631g',
symmetry = 1)
m = scf.RHF(mol).set(conv_tol=1e-15).run()
mc = mcscf.casci_symm.CASCI(m, 4, (2, 0))
mc.fcisolver.nroots = 2
mc.kernel()[0]
self.assertTrue(len(mc.e_tot) == 1)
ss = mc.fcisolver.spin_square(mc.ci[0], mc.ncas, mc.nelecas)
self.assertAlmostEqual(ss[0], 2, 9)
mc = mcscf.casci.CASCI(m, 4, (2, 0))
mc.fcisolver.nroots = 2
mc.kernel()[0]
ss = mc.fcisolver.spin_square(mc.ci[1], mc.ncas, mc.nelecas)
self.assertAlmostEqual(ss[0], 2, 9)
if __name__ == "__main__":
print("Full Tests for spin1-symm")
unittest.main()
|
gkc1000/pyscf
|
pyscf/fci/test/test_spin1_symm.py
|
Python
|
apache-2.0
| 3,685
|
[
"PySCF"
] |
ecc1dc307b4cdce85684c1fe7cd5ae958263bcb1de6bd9ca8e44a2e8253487cb
|
#!/usr/local/bin/python
# -- coding: utf-8 --
import nltk
from __future__ import division
# ------------------------------------------------------------------------------
# Work through the http://nltk.org/book/
# -- gene at ology dot net not dot com
#
# Handy links:
# http://nltk.org/
# http://stackoverflow.com/questions/4092994/unable-to-install-matplotlib-on-mac-os-x
# ------------------------------------------------------------------------------
from nltk.corpus import gutenberg
# Show available texts.
gutenberg.fileids()
alice = gutenberg.words('carroll-alice.txt')
bible = gutenberg.words('bible-kjv.txt')
hamlet = gutenberg.words('shakespeare-hamlet.txt')
macbeth = gutenberg.words('shakespeare-macbeth.txt')
# Average word & sentence length, and the number of times each vocabulary item
# appears in the text on average (lexical diversity).
for fileid in gutenberg.fileids():
num_chars = len(gutenberg.raw(fileid))
num_words = len(gutenberg.words(fileid))
num_sents = len(gutenberg.sents(fileid))
num_vocab = len(set([w.lower() for w in gutenberg.words(fileid)]))
print int(num_chars/num_words), int(num_words/num_sents), int(num_words/num_vocab), fileid
# Fondling sentences.
macbeth_sentences = gutenberg.sents('shakespeare-macbeth.txt')
longest_len = max([len(s) for s in macbeth_sentences])
[s for s in macbeth_sentences if len(s) == longest_len]
# Brown!
from nltk.corpus import brown
brown.categories()
genres = ['news', 'religion', 'hobbies', 'science_fiction', 'romance', 'humor']
modals = ['can', 'could', 'may', 'might', 'must', 'will']
whords = ['what', 'when', 'where', 'who', 'why']
scifi = brown.words(categories=genres[3])
#scifi = brown.words(fileids=['cm01'])
#religion = brown.words(categories=genres[1])
#religion_scifi = brown.words(categories=[genres[1], genres[3]])
fdist = nltk.FreqDist([w.lower() for w in scifi])
for m in modals:
print m + ':', fdist[m]
for w in whords:
print w + ':', fdist[w],
# Conditional frequency distribution
cfd = nltk.ConditionalFreqDist(
(genre, word)
for genre in brown.categories()
for word in brown.words(categories=genre)
)
cfd.tabulate(conditions=genres, samples=modals)
cfd.tabulate(conditions=genres, samples=whords)
# Reuters overlapping news test and training texts.
from nltk.corpus import reuters
len(reuters.fileids()) # number of files
reuters.fileids()[3017:3021] # filenames at the test-training transition.
reuters.categories()
metals = ['copper', 'gold', 'iron-steel', 'lead', 'nickel', 'palladium',
'platinum', 'silver', 'strategic-metal', 'tin', 'zinc']
len(reuters.fileids(metals))
# Show freq.dist. for words in metals.
for m in metals:
fdistm = nltk.FreqDist(set([w.lower() for w in reuters.words(reuters.fileids(m)) if w.isalpha()]))
vocabm = fdistm.keys()
vocabm[:20]
# Inaugural Address Corpus
from nltk.corpus import inaugural
inaugural.fileids()
def cond_freq_dist(text, target1, target2):
cfd = nltk.ConditionalFreqDist(
(target, fileid[:4]) # word-target, address-year
for fileid in text.fileids() # inagural address
for w in text.words(fileid) # all words in the address
for target in [target1, target2] # for each word
if w.lower().startswith(target)) # ...that is lower, etc.
cfd.plot()
cond_freq_dist(inaugural, 'america', 'citizen')
cond_freq_dist(inaugural, 'force', 'security')
cond_freq_dist(inaugural, 'freedom', 'security')
cond_freq_dist(inaugural, 'terror', 'safe')
# Corpora in Other Languages
from nltk.corpus import udhr
# List languages.
len([latin for latin in udhr.fileids() if latin.endswith('-Latin1')])
#190
len([utf8 for utf8 in udhr.fileids() if utf8.endswith('-UTF8')])
#93
[latin for latin in udhr.fileids() if 'french' in latin.lower()]
#['French_Francais-Latin1']
# Nested loop to get filenames.
languages = ['English', 'French', 'German', 'Italian', 'Spanish' ]
files = []
for file in udhr.fileids():
for lang in languages:
if lang in file:
files.append(file)
# List comprehension of same:
[file for file in udhr.fileids() for lang in languages if lang in file]
# ConditionalFreqDist for words in the given languages.
def udhr_cond_freq_dist(udhr, languages):
cfd = nltk.ConditionalFreqDist(
(lang, len(word))
for file in udhr.fileids()
for lang in languages if lang in file
for word in udhr.words(file))
cfd.plot()
udhr_cond_freq_dist(udhr, languages)
# Parsing methods.
raw = udhr.raw('English-Latin1')
raw[:50]
words = udhr.words('English-Latin1')
words[:10]
sents = udhr.sents('English-Latin1')
sents[0] #[u'Universal', u'Declaration', u'of', u'Human', u'Rights'...
# Loading your own Corpus
from nltk.corpus import PlaintextCorpusReader
corpus_root = '/usr/share/dict'
wordlists = PlaintextCorpusReader(corpus_root, '.*')
wordlists.fileids()
wordlists.words('connectives')
# 2.2 Conditional Frequency Distributions
from nltk.corpus import brown
cats = ['religion', 'science_fiction']
genre_word = [
(genre, word)
for genre in cats
for word in brown.words(categories=genre)
]
len(genre_word)
genre_word[:4]
genre_word[-4:]
cfd = nltk.ConditionalFreqDist(genre_word)
cfd
cfd.conditions()
cfd[cats[0]]
cfd[cats[1]] #<FreqDist with 3233 samples and 14470 outcomes>
len(list(cfd[cats[1]])) #3233
cfd[cats[1]]['could'] #49
# Plotting and Tabulating Distributions
from nltk.corpus import udhr
def udhr_cond_freq_dist(udhr, languages):
return nltk.ConditionalFreqDist(
(lang, len(word))
for file in udhr.fileids()
for lang in languages if lang in file
for word in udhr.words(file))
languages = [
'English',
'French',
'German',
'Italian',
'Spanish',
]
langs = ['English', 'German']
cfd = udhr_cond_freq_dist(udhr, languages)
cfd.tabulate(conditions=langs, samples=range(1,10), cumulative=True)
# Your Turn
from nltk.corpus import brown
cats = brown.categories() #['news', 'religion', 'science_fiction']
cfd = nltk.ConditionalFreqDist(
(genre, word)
for genre in cats
for word in brown.words(categories=genre)
)
days = [day + 'day' for day in ['Mon','Tues','Wednes','Thurs','Fri','Satur','Sun']]
cfd.tabulate(samples=days)
cfd.plot(samples=days)
# Generating Bigram Text
f = 'english-kjv.txt'
w = nltk.corpus.gutenberg.words(f)
b = nltk.bigrams(w)
cfd = nltk.ConditionalFreqDist(b)
def generate_model(cfdist, word, num=15):
for i in range(num):
print word,
word = cfdist[word].max() # XXX max() renders loops
generate_model(cfd, 'living')
f = 'carroll-alice.txt'
w = nltk.corpus.gutenberg.words(f)
b = nltk.bigrams(w)
cfd = nltk.ConditionalFreqDist(b)
cfd['rabbit'] #<FreqDist with 3 samples and 5 outcomes>
cfd['Rabbit'] #<FreqDist with 30 samples and 45 outcomes>
print cfd['Rabbit'] #<FreqDist: ',': 8, "'": 4, 'blew': 2, 'came': 2...>
generate_model(cfd, 'Rabbit')
from nltk.corpus import PlaintextCorpusReader
corpus_root = '/Users/gene/Backed/Documents'
taow = PlaintextCorpusReader(corpus_root, 'artofwar.txt')
b = nltk.bigrams(taow.words())
len(b) #13037
cfd = nltk.ConditionalFreqDist(b)
generate_model(cfd, 'enemy') # enemy ' s own men , and the enemy ' s own men , and
# Functions
f = open('/Users/gene/Backed/Documents/artofwar.txt')
raw = f.read()
def lexical_diversity(text):
word_count = len(text)
vocab_size = len(set(text))
lexical_diversity = word_count / vocab_size
return lexical_diversity
lexical_diversity(raw)
# XXX Lame:
def plural(word):
if word.endswith('y'):
return word[:-1] + 'ies'
elif word[-1] in 'sx' or word[-2:] in ['sh', 'ch']:
return word + 'es'
elif word.endswith('an'):
return word[:-2] + 'en'
else:
return word + 's'
plural('boy') #'boies'
plural('fan') #'fen'
# Modules
# local import:
from textproc import plural
plural('boy') #'boies'
plural('fan') #'fen'
# Wordlist Corpora
def unusual_words(text):
text_vocab = set(w.lower() for w in text if w.isalpha())
english_vocab = set(w.lower() for w in nltk.corpus.words.words())
unusual = text_vocab.difference(english_vocab)
return sorted(unusual)
unusual_words(nltk.corpus.gutenberg.words('austen-sense.txt'))
from nltk.corpus import stopwords
stopwords.words('english')
def content_fraction(text):
stopwords = nltk.corpus.stopwords.words('english')
content = [w for w in tokens if w.isalpha() and w.lower() not in stopwords]
return len(content) / len(text)
content_fraction(nltk.corpus.reuters.words())
# Word Puzzle
puzzle_letters = nltk.FreqDist('egivrvonl')
obligatory = 'r'
wordlist = nltk.corpus.words.words()
[w for w in wordlist if len(w) >= 6
and obligatory in w
and nltk.FreqDist(w) <= puzzle_letters]
# Personal names.
names = nltk.corpus.names
names.fileids() #['female.txt', 'male.txt']
male_names = names.words('male.txt')
female_names = names.words('female.txt')
androgenous = [w for w in male_names if w in female_names]
len(androgenous) #365
cfd = nltk.ConditionalFreqDist(
(fileid, name[-1])
for fileid in names.fileids()
for name in names.words(fileid)
)
cfd.plot()
# Pronounciation database entries.
# word x phonetic code list ("phones")
# Where phone digits represent primary stress (1), secondary stress (2) & no
# stress (0).
entries = nltk.corpus.cmudict.entries()
len(entries) #133737
entries[133736] #('zywicki', ['Z', 'IH0', 'W', 'IH1', 'K', 'IY0'])
i = 39943
j = 9
for entry in entries[ i : i + j ]:
print entry
[e for e in entries if e[0] == 'gene'] # [('gene', ['JH', 'IY1', 'N'])]
[e for e in entries if e[0].startswith('gene')] # [('gene', ['JH', 'IY1', 'N'])]
syllable = ['N', 'IH0', 'K', 'S']
[word for word, pron in entries if pron[-4:] == syllable]
syllable = ['IY1', 'N']
[word for word, pron in entries if pron[-2:] == syllable]
# Show words that end with a "silent n."
[w for w, pron in entries if pron[-1] == 'M' and w[-1] == 'n']
# Show the sorted set of initial letters of words that sound like they start
# with an 'N' but actually don't.
sorted(set(w[:2] for w, pron in entries if pron[0] == 'N' and w[0] != 'n'))
#['gn', 'kn', 'mn', 'pn']
# Return the syllable stress list.
def stress(pron):
return [char for phone in pron for char in phone if char.isdigit()]
[w for w, pron in entries if stress(pron) == ['0', '1', '0', '2', '0']]
[w for w, pron in entries if stress(pron) == ['0', '2', '0', '1', '0']]
[w for w, pron in entries if stress(pron) == ['0', '0']]
# Pronunciation dictionary.
prondict = nltk.corpus.cmudict.dict()
prondict['fire'] #[['F', 'AY1', 'ER0'], ['F', 'AY1', 'R']]
prondict['foo'] #[['F', 'UW1']]
prondict['foobar'] #[['F', 'UW1', 'B', 'AA1', 'R']]
prondict['foobarbaz']
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
KeyError: 'foobarbaz'
prondict['jazz'] # [['JH', 'AE1', 'Z']]
prondict['foobarbaz'] = [['F', 'UW1', 'B', 'AA1', 'R', 'B', 'AE1', 'Z']]
# Kraftwerk ftw.
text = ['bowing', 'boom', 'chalk']
[ph for w in text for ph in prondict[w][0]]
#['B', 'OW1', 'IH0', 'NG', 'B', 'UW1', 'M', 'CH', 'AA1', 'K']
prondict['boing'] = [['B', 'OY1', 'N']]
text = ['boing', 'boom', 'chuck']
[ph for w in text for ph in prondict[w][0]]
#['B', 'OY1', 'N', 'B', 'UW1', 'M', 'CH', 'AH1', 'K']
# Comparative Wordlists
from nltk.corpus import swadesh
len(swadesh.words('en')) #207
en2fr = swadesh.entries(['en', 'fr'])
translate = dict(en2fr)
translate['one'] #'un'
translate['two'] #'deux'
translate['three'] #'trois'
# Swap wordlists.
en2es = swadesh.entries(['en', 'es'])
translate.update(dict(en2es))
translate['dog'] #'perro'
translate.update(dict(en2fr))
translate['dog'] #'chien'
# Show common entries.
languages = ['en', 'de', 'nl', 'es', 'fr', 'pt', 'la']
entry = range(139, 143)
for i in entry:
print swadesh.entries(languages)[i]
# Toolbox Lexicon
# http://www.sil.org/computing/toolbox/
from nltk.corpus import toolbox
toolbox.entries('rotokas.dic')
# TODO Find/build useful toolbox.dic files.
# WordNet
from nltk.corpus import wordnet as wn
# synset, synsets: synonym set
# lemma: collection of synonymous words
syn = 'car.n.01'
wn.synsets('motorcar')
wn.synset(syn).lemma_names #['car', 'auto', 'automobile', 'machine', 'motorcar']
wn.synset(syn).definition #'a motor vehicle with four wheels;...
wn.synset(syn).examples #['he needs a car to get to work']
wn.synset(syn).lemmas #[Lemma('car.n.01.car'), Lemma('car.n.01.auto'), ...
wn.lemma(syn + '.automobile') #Lemma('car.n.01.automobile')
wn.lemma(syn + '.automobile').synset #Synset('car.n.01')
wn.lemma(syn + '.automobile').name #'automobile'
wn.synsets('car') #[Synset('car.n.01'), Synset('car.n.02'),...
for synset in wn.synsets('car'):
print synset.lemma_names
#['car', 'auto', 'automobile', 'machine', 'motorcar']
#['car', 'railcar', 'railway_car', 'railroad_car']
#['car', 'gondola']
#['car', 'elevator_car']
#['cable_car', 'car']
wn.lemmas('car') #[Lemma('car.n.01.car'), Lemma('car.n.02.car'),...
# The WordNet Hierarchy
#
# Hypernyms and hyponyms are lexical relations that relate synsets. These
# relations navigate up and down the "is-a" path hierarchy.
# Hyponym: conceptually "more specific"
# Hypernym: conceptually "more general"
# Hypernym paths lead to more general terms.
cars = wn.synset('car.n.01')
car_types = cars.hyponyms()
car_types[26] #Synset('ambulance.n.01')
sorted([lemma.name for synset in car_types for lemma in synset.lemmas])
#['Model_T', 'S.U.V.', 'SUV'...'used-car', 'waggon', 'wagon']
cars.hypernyms()
paths = cars.hypernym_paths()
len(paths) #2
# wheeled_vehicle.n.01 is classed as both vehicle & container.
[synset.name for synset in paths[0]]
[synset.name for synset in paths[1]]
cars.root_hypernyms()
# Brose WN.
nltk.app.wordnet() # W00! Very cool!
# More Lexical Relations
# meronyms: Parts, kinds of
wn.synset('tree.n.01').part_meronyms()
wn.synset('tree.n.01').substance_meronyms()
# holonyms: Groups of
wn.synset('tree.n.01').member_holonyms()
# Inspect the relations of the word "mint."
for synset in wn.synsets('mint', wn.NOUN):
print synset.name + ':', synset.definition
# The leaves are a part of the plant.
wn.synset('mint.n.04').part_holonyms() #[Synset('mint.n.02')]
# A derived candy from the plant.
wn.synset('mint.n.04').substance_holonyms() #[Synset('mint.n.05')]
# Verb entailments.
wn.synset('walk.v.01').entailments() #[Synset('step.v.01')]
wn.synset('eat.v.01').entailments()
#[Synset('swallow.v.01'), Synset('chew.v.01')]
wn.synset('tease.v.03').entailments()
#[Synset('arouse.v.07'), Synset('disappoint.v.01')]
# Lemma relations.
wn.lemma('supply.n.02.supply').antonyms()
#[Lemma('demand.n.02.demand')]
wn.lemma('rush.v.01.rush').antonyms()
#[Lemma('linger.v.04.linger')]
wn.lemma('horizontal.a.01.horizontal').antonyms()
#[Lemma('vertical.a.01.vertical'), Lemma('inclined.a.02.inclined')]
wn.lemma('staccato.r.01.staccato').antonyms()
#[Lemma('legato.r.01.legato')]
# Methods defined on a synset.
dir(wn.synset('harmony.n.02'))
# ['__class__', '__delattr__' ... 'verb_groups', 'wup_similarity']
help(wn) # Also handy.
# Semantic Similarity.
right = wn.synset('right_whale.n.01')
things = ['orca', 'minke_whale', 'tortoise', 'novel']
i = 0
# Show common conceptual ancestors and respective metrics.
for thing in things:
i += 1
syns = wn.synset(thing + '.n.01')
print '%d path. %s %s = %.4f similarity' % (i, right, syns, right.path_similarity(syns))
for hyp in right.lowest_common_hypernyms(syns):
min = hyp.min_depth()
print '\t%s = %d synset depth' % (hyp, min)
# Verbs!
from nltk.corpus import verbnet as vn
vn
<VerbnetCorpusReader in '.../corpora/verbnet' (not loaded yet)>
# XXX Can't make this work, even after reading the source (with no docs).
# 2.8 Exercises
# 2.
austen = nltk.corpus.gutenberg.words('austen-sense.txt')
len(austen) #141576
len(set(austen)) #6833
# 4.
from nltk.corpus import state_union
state_union.fileids()
def cond_freq_dist(text, targets):
cfd = nltk.ConditionalFreqDist(
(target, fileid[:4]) # word target, file name year
for fileid in text.fileids() # text file name
for w in text.words(fileid) # all words in the address
for target in targets # all targets
if w.lower().startswith(target)) # ...that is lower, etc.
cfd.plot()
targets = ['people','man','woman']
cond_freq_dist(state_union, targets)
targets = ['terror','freedom','secur','priv']
cond_freq_dist(state_union, targets)
# 5.
from nltk.corpus import wordnet as wn
# Meronyms: Parts, kinds of
#{member,part,substance}_meronyms()
# Holonyms: Groups of
#{member,part,substance}_holonyms()
concept = 'think'
for synset in wn.synsets(concept, wn.VERB):
print synset.name + ':', synset.definition
syn = 'shopfront.n.01'
wn.synset(syn).member_meronyms()
wn.synset(syn).part_meronyms()
wn.synset(syn).substance_meronyms()
wn.synset(syn).member_holonyms()
wn.synset(syn).part_holonyms()
wn.synset(syn).substance_holonyms()
# 7.
text = nltk.Text(state_union.words())
text.concordance('however') # . However , = 25. , however , = 67.
# 8. Already done above
# 9. Words in common (but with different usage-meaning)
nltk.corpus.gutenberg.fileids() # What are the file names again?
austen = nltk.corpus.gutenberg.words('austen-sense.txt')
austen_vocab = set([w.lower() for w in austen if w.isalpha()])
moby = nltk.corpus.gutenberg.words('melville-moby_dick.txt')
moby_vocab = set([w.lower() for w in moby if w.isalpha()])
shared_vocab = [a for a in austen_vocab for m in moby_vocab if a == m]
len(shared_vocab) #4354
# TODO Use WN to puzzle-out "different usage-meaning?"
# 10. "word types?" As in ...stemming? XXX This was never discussed.
# # Hyponym: conceptually "more specific" ^ Maybe
# 11. Already done above
# 12.
prondict = nltk.corpus.cmudict.dict()
len(prondict) #123455
len(set(prondict)) #123455 - All distinct.
alphas = [w for w in prondict if w.isalpha()]
len(alphas) #115533
multi_pron = [w for w in alphas if len(prondict[w]) > 1]
len(multi_pron) #8492
8492 / 115533 #7.4% of words have muliple pronunciations.
# 13. Noun synsets without hyponyms.
from nltk.corpus import wordnet as wn
for s in wn.all_synsets('n'):
if len(s.hyponyms()) == 0:
print s
# 14. Show word: defn & any {hyper,hypo}nyms with defns.
def supergloss(s):
print "%s: %s" % (wn.synset(syn).lemma_names[0], s.definition)
if len(s.hypernyms()) > 0:
print "Hypernyms:"
for hyp in s.hypernyms():
print "\t%s: %s" % (hyp.lemma_names[0], hyp.definition)
print "Hyponyms:"
for hyp in s.hyponyms():
print "\t%s: %s" % (hyp.lemma_names[0], hyp.definition)
syn = 'car.n.01'
supergloss(wn.synset(syn))
# 15. All words that occur at least X times in document Y.
from nltk.corpus import brown
adventure = brown.words(categories=brown.categories()[0])
scifi = brown.words(categories=brown.categories()[-1])
len(scifi) #14470
len([w.lower() for w in scifi if w.isalpha()]) #11762
len(set([w.lower() for w in scifi if w.isalpha()])) #2870
# Return a list of pair-lists for "normalized" word occurances.
def occurance(n, text):
fdist = nltk.FreqDist([w.lower() for w in text if w.isalpha()])
occurance = []
i = 0
for word in fdist.keys():
if fdist[word] >= n:
occurance.append([word, fdist[word]])
i += 1
print '%d. %s: %d' % (i, word, fdist[word])
return occurance
at_least = occurance(700, scifi) #1. the: 723
at_least = occurance(300, scifi) #1. the: 723 2. of: 329 3. to: 306
# 16. Categorical lexical diversity.
#print 'Genre, Tokens, Types, Lexical diversity'
for genre in brown.categories():
w = brown.words(categories=genre)
x = len(w)
y = len(set(w))
print '%s, %d, %d, %f' % (genre, x, y, (x / y))
# 17. most frequently occurring words of a text that are not stopwords
from nltk.corpus import stopwords
stopwords = nltk.corpus.stopwords.words('english')
def freq_occuring_words(n, text, stop):
content = [w.lower() for w in text if w.isalpha() and w.lower() not in stopwords]
fd = nltk.FreqDist(content)
print fd.keys()[:n + 1]
freq_occuring(50, adventure, stopwords)
# 18. print the 50 most frequent non-stopword bigrams
def freq_occuring_bigrams(n, words, stop):
bigrams = nltk.bigrams(words)
cfd = nltk.FreqDist(
(w1, w2)
for (w1, w2) in bigrams
if w1.isalpha() and w1 not in stop
and w2.isalpha() and w2 not in stop
)
print cfd.keys()[:n + 1]
freq_occuring_bigrams(20, adventure, stopwords)
freq_occuring_bigrams(20, scifi, stopwords)
freq_occuring_bigrams(20, scifi + adventure, stopwords)
# 19. create a table of word frequencies by genre. find words whose presence (or
# absence) is typical of a genre.
cats = brown.categories()
subj_prons = ['i', 'he', 'she', 'it', 'they', 'we', 'you']
obj_prons = ['me', 'him', 'her', 'it', 'them', 'us', 'you']
# XXX 'it' and 'you' are ambiguous without grammatical analysis.
cfd = nltk.ConditionalFreqDist(
(genre, word.lower())
for genre in cats
for word in brown.words(categories=genre)
if word.isalpha()
)
cfd.tabulate(conditions=cats, samples=subj_prons)
cfd.tabulate(conditions=cats, samples=obj_prons)
# ^ Fascinating!
# 20. compute the freq of the word in the brown genre.
def brown_word_freq(word, genre):
fd = nltk.FreqDist([w.lower() for w in brown.words(categories=genre) if w.isalpha()])
print fd[word]
brown_word_freq('he', 'religion')
brown_word_freq('she', 'religion')
# ^ This shows the same as the religion row of pronoun table, in 19 above.
# 21. guess the number of syllables contained in a text
from nltk.corpus import cmudict
dict = cmudict.dict()
words = cmudict.words()
def syllable_guess(text, dict, words):
syllables = 0
for token in text:
if token.isalpha() and token.lower() in words:
#print len(dict[token.lower()][0])
syllables += len(dict[token.lower()][0])
print 'Syllables: ', syllables
syllable_guess(adventure, dict, words) #1443
# ^ That takes almost forever. :\
from time import time
start_time = time();
syllable_guess(adventure, dict, words);
print time() - start_time, "seconds"
#101.207468033 seconds - yep
syllable_guess(scifi, dict, words); #42177
# 21.222469807 seconds
# 22. Put string M between every Nth word in a text.
def hedge(text, string='F00', n=3):
hedge = []
i = 0
for word in text:
if i != 0 and not i % n:
hedge.append(string)
i += 1
hedge.append(word)
return hedge
h = hedge(scifi)
h[:20]
h = hedge(scifi, 'like') # Ha
h = hedge(scifi, 'like', 4) # Haha
# 23. Zipf's Law "frequency of a word type is inversely proportional to its
# rank." Example: the 50th most common word should occur three times as
# frequently as the 150th most common word.
def zipf(text):
fd = nltk.FreqDist([w.lower() for w in text if w.isalpha()])
return fd
z = zipf(scifi)
z.B() #2870
z.N() #11762
z.max() #'the'
z['the'] #723
z.Nr(723) #1 word occurs 723 times
z.Nr(10) #14 words occur 10 times each
z.freq('the') #0.061469137901717395
z.keys()[:10] #['the', 'of', 'to', 'and', 'a', 'was', 'he', 'in', 'it', 'had']
z.samples()[:10] #['the', 'of', 'to', 'and', 'a', 'was', 'he', 'in', 'it', 'had']
z.values()[:10] #[723, 329, 306, 294, 236, 200, 191, 164, 158, 143]
z.items()[:5] #[('the', 723), ('of', 329), ('to', 306), ('and', 294), ('a', 236)]
z.hapaxes()[:5] #['ab', 'abc', 'abdomen', 'abdominal', 'absentmindedly']
# 23.1.
import matplotlib.pyplot as plt
nonhap = [w for w in z.samples() if w not in z.hapaxes()] #len()==1095
x = 400 #len(nonhap)
y = z.values()[:x] #[z.freq(w) for w in nonhap] # <- Identical
plt.plot(range(x), y)
#[<matplotlib.lines.Line2D object at 0x107fa3390>]
plt.show()
# 23.2. Tokenize a string of random words & plot as before.
import random
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
max = 0
for w in nonhap:
max += len(w)
max += len(nonhap)
string = ''
for i in range(max):
for char in random.choice('abcdefg '):
string += char
len(string) #7439
tokens = word_tokenize(string)
z = zipf(tokens)
x = len(set(tokens))
y = z.values()[:x] #[z.freq(w) for w in nonhap] # <- Identical
plt.plot(range(x), y)
plt.show()
# Conclusion: Graph is jagged but sort-of imitates the smooth 23.1 graph.
# 24. Modify the text generation program to:
# 24.1. Generate a set of quasi-random phrases from the top N most likely words.
def generate_model(text, top=10, tokens=5):
# Only consider the words ("alpha tokens") of more than one letter.
words = [w.lower() for w in text if len(w) > 1 and w.isalpha() and not w.isupper()]
# Compute the frequency distribution of non-stopwords.
stopwords = nltk.corpus.stopwords.words('english')
fdist = nltk.FreqDist([w for w in words if w.lower() not in stopwords])
# Compute the conditional frequency distribution of adjacent word pairs.
cfdist = nltk.ConditionalFreqDist(nltk.bigrams(words))
# Generate a phrase for each of the most frequent non-stopwords.
for w in fdist.keys()[:top]:
# Produce a phrase.
for i in range(tokens):
print w,
# Choose a frequently adjacent word, for the next phrase word.
w = random.choice(list(cfdist[w]))
# Output a newline at the end of a phrase.
print
alice = nltk.corpus.gutenberg.words('carroll-alice.txt')
generate_model(alice)
generate_model(alice, 10, 10)
genesis = nltk.corpus.genesis.words('english-kjv.txt')
generate_model(genesis) # quasischizobiblical
generate_model(genesis, 10, 1) # <- Most frequent non-stopwords
generate_model(genesis, 10, 9) # <- 10, 9 word phrases
# 24.2. Train the model on a corpus
# XXX "Train?" This topic is not discussed, or even alluded to, in the chapter.
from nltk.corpus import brown
scifi = brown.words(categories='science_fiction')
generate_model(scifi, 10, 10)
# 24.3. Train the model on a hybrid corpus
generate_model(genesis + scifi, 20, 9) # <- Sometimes non-sequitur-funny
mystery = brown.words(categories='mystery')
generate_model(genesis + mystery, 20, 9)
adventure = brown.words(categories='adventure')
generate_model(genesis + adventure, 20, 9)
humor = brown.words(categories='humor')
generate_model(genesis + humor, 20, 9)
learned = brown.words(categories='learned')
generate_model(genesis + learned, 20, 9)
# Genesis is not as rich, so its words are the most frequent, and therefore
# are the 1st words of the phrase. But then the richer text "takes over."
# 25. Return UDHR languages that have a given string as a word.
from nltk.corpus import udhr
def find_language(string):
langs = []
for file in udhr.fileids():
if file.endswith('-Latin1'):
print 'Checking', file + '...'
if string.lower() in [w.lower() for w in udhr.words(file)]:
langs.append(file)
print langs
find_language('equal')
# 26. branching factor of the noun hypernym hierarchy?
from nltk.corpus import wordnet as wn
def branch_factor(pos='n'):
# Initialize the counters.
hypo_num = 0
hypo_sum = 0
hyper_num = 0
hyper_sum = 0
# Tally the hyp*s for each synset.
for syn in wn.all_synsets(pos):
hypo_len = len(syn.hyponyms())
hyper_len = len(syn.hypernyms())
if hypo_len:
hypo_num += 1
hypo_sum += hypo_len
if hyper_len:
hyper_num += 1
hyper_sum += hypo_len
print '%s - Hyponyms: %d, Hypernyms: %d' % (syn.lemmas[0].name, hypo_len, hyper_len)
# Compute the averages.
hypo_avg = 0 if hypo_num == 0 else hypo_sum / hypo_num
hyper_avg = 0 if hyper_num == 0 else hyper_sum / hyper_num
# Output the results.
print 'POS: %s, Hyponyms: n=%d, t=%d, avg=%.3f' % (pos, hypo_num, hypo_sum, hypo_avg)
print 'POS: %s, Hypernyms: n=%d, t=%d, avg=%.3f' % (pos, hyper_num, hyper_sum, hyper_avg)
branch_factor()
#POS: n, Hyponyms: n=16693, t=75850, avg=4.544
#POS: n, Hypernyms: n=74389, t=75831, avg=1.019
# 27. Compute average polysemy of n, v, adj & adv in WN.
branch_factor(pos=wn.VERB)
#POS: v, Hyponyms: n=3315, t=13239, avg=3.994
#POS: v, Hypernyms: n=13208, t=9949, avg=0.753
branch_factor(pos=wn.ADV)
#POS: r, Hyponyms: n=0, t=0, avg=0.000
#POS: r, Hypernyms: n=0, t=0, avg=0.000
branch_factor(pos=wn.ADJ)
#POS: a, Hyponyms: n=0, t=0, avg=0.000
#POS: a, Hypernyms: n=0, t=0, avg=0.000
# XXX r & a == 0?
# 28. Rank pairs in order of decreasing similarity.
string = 'car-automobile, gem-jewel, journey-voyage, boy-lad, coast-shore, asylum-madhouse, magician-wizard, midday-noon, furnace-stove, food-fruit, bird-cock, bird-crane, tool-implement, brother-monk, lad-brother, crane-implement, journey-car, monk-oracle, cemetery-woodland, food-rooster, coast-hill, forest-graveyard, shore-woodland, monk-slave, coast-forest, lad-wizard, chord-smile, glass-magician, rooster-voyage, noon-string'
pairs = string.split(', ')
# TODO Split on ',' and then trim instead?
# Make a dict of the pairs with path_similarity as value.
# Then sort by value, then sort by key.
sims = dict()
for item in pairs:
pair = list(item.split('-'))
i = wn.synsets(pair[0])[0]
j = wn.synsets(pair[1])[0]
sims[item] = i.path_similarity(j)
print i, j, i.lowest_common_hypernyms(j), i.path_similarity(j)
#Synset('car.n.01') Synset('car.n.01') [Synset('car.n.01')] 1.0
#...
#Synset('noon.n.01') Synset('string.n.01') [Synset('entity.n.01')] 0.0588235294118
sims
#{'midday-noon': 1.0, 'cemetery-woodland': 0.1111111111111111, 'journey-car': 0.05, 'crane-implement': 0.1, 'noon-string': 0.058823529411764705, 'bird-crane': 0.1111111111111111, 'glass-magician': 0.1111111111111111, 'forest-graveyard': 0.07142857142857142, 'brother-monk': 0.125, 'monk-oracle': 0.125, 'chord-smile': 0.09090909090909091, 'bird-cock': 0.0625, 'food-fruit': 0.09090909090909091, 'boy-lad': 0.3333333333333333, 'furnace-stove': 0.07692307692307693, 'coast-hill': 0.2, 'lad-wizard': 0.2, 'monk-slave': 0.2, 'asylum-madhouse': 0.125, 'tool-implement': 0.5, 'shore-woodland': 0.2, 'lad-brother': 0.14285714285714285, 'magician-wizard': 0.16666666666666666, 'journey-voyage': 0.25, 'coast-forest': 0.09090909090909091, 'gem-jewel': 0.125, 'rooster-voyage': 0.041666666666666664, 'food-rooster': 0.0625, 'coast-shore': 0.5, 'car-automobile': 1.0}
# http://stackoverflow.com/questions/3417760/how-to-sort-a-python-dict-by-value
list(sorted(sims, key=sims.__getitem__, reverse=True)) # XXX Too advanced for ch2 ;)
#['midday-noon', 'car-automobile', 'tool-implement', 'coast-shore', 'boy-lad', 'journey-voyage', 'coast-hill', 'lad-wizard', 'monk-slave', 'shore-woodland', 'magician-wizard', 'lad-brother', 'brother-monk', 'monk-oracle', 'asylum-madhouse', 'gem-jewel', 'cemetery-woodland', 'bird-crane', 'glass-magician', 'crane-implement', 'chord-smile', 'food-fruit', 'coast-forest', 'furnace-stove', 'forest-graveyard', 'bird-cock', 'food-rooster', 'noon-string', 'journey-car', 'rooster-voyage']
# Build a list of string pairs, sorted by path_similarity || alpha of pair.
path_sims = []
for item in pairs:
pair = list(item.split('-'))
i = wn.synsets(pair[0])[0]
j = wn.synsets(pair[1])[0]
k = i.path_similarity(j)
path_sims.insert(0, (round(k, 4), item)) # XXX Does not prepend?
def or_cmp(a, b):
x = 0
if a[0] == b[0]:
x = cmp(b[1], a[1])
else:
x = cmp(a[0], b[0])
return x
sorted(path_sims, cmp=or_cmp, reverse=True)
#[(1.0, 'car-automobile'), (1.0, 'midday-noon'), (0.5, 'coast-shore'), (0.5, 'tool-implement'), (0.3333, 'boy-lad'), (0.25, 'journey-voyage'), (0.2, 'coast-hill'), (0.2, 'lad-wizard'), (0.2, 'monk-slave'), (0.2, 'shore-woodland'), (0.1667, 'magician-wizard'), (0.1429, 'lad-brother'), (0.125, 'asylum-madhouse'), (0.125, 'brother-monk'), (0.125, 'gem-jewel'), (0.125, 'monk-oracle'), (0.1111, 'bird-crane'), (0.1111, 'cemetery-woodland'), (0.1111, 'glass-magician'), (0.1, 'crane-implement'), (0.0909, 'chord-smile'), (0.0909, 'coast-forest'), (0.0909, 'food-fruit'), (0.0769, 'furnace-stove'), (0.0714, 'forest-graveyard'), (0.0625, 'bird-cock'), (0.0625, 'food-rooster'), (0.0588, 'noon-string'), (0.05, 'journey-car'), (0.0417, 'rooster-voyage')]
|
ology/NLTK-Study
|
ch02.py
|
Python
|
artistic-2.0
| 32,251
|
[
"ORCA"
] |
ddbffaf5b2158200d31abf5ebea6fc713ea981467f78ce3e52ef1a9c14023fef
|
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**IS Utilitles implementation.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'tim@linfiniti.com'
__revision__ = '$Format:%H$'
__date__ = '29/01/2011'
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import os
import sys
import traceback
import logging
import math
import numpy
import uuid
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import QCoreApplication
from qgis.core import (QGis,
QgsRasterLayer,
QgsMapLayer,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsGraduatedSymbolRendererV2,
QgsSymbolV2,
QgsRendererRangeV2,
QgsSymbolLayerV2Registry,
QgsColorRampShader,
QgsRasterTransparency,
QgsVectorLayer,
QgsFeature
)
from safe_interface import temp_dir
from safe_qgis.exceptions import (StyleError,
MethodUnavailableError,
MemoryLayerCreationError)
from safe_qgis.safe_interface import DEFAULTS, safeTr, get_version
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'third_party')))
from raven.handlers.logging import SentryHandler
from raven import Client
#do not remove this even if it is marked as unused by your IDE
#resources are used by htmlfooter and header the comment will mark it unused
#for pylint
import safe_qgis.resources # pylint: disable=W0611
LOGGER = logging.getLogger('InaSAFE')
def setVectorStyle(theQgisVectorLayer, theStyle):
"""Set QGIS vector style based on InaSAFE style dictionary.
For **opaque** a value of **0** can be used. For **fully transparent**, a
value of **100** can be used. The function should take care to scale the
transparency level to between 0 and 100.
Args:
* theQgisVectorLayer: QgsMapLayer
* theStyle: dict - Dictionary of the form as in the example below
Returns:
None - Sets and saves style for theQgisVectorLayer
Raises:
None
Example:
{'target_field': 'DMGLEVEL',
'style_classes':
[{'transparency': 1, 'max': 1.5, 'colour': '#fecc5c',
'min': 0.5, 'label': 'Low damage', 'size' : 1},
{'transparency': 55, 'max': 2.5, 'colour': '#fd8d3c',
'min': 1.5, 'label': 'Medium damage', 'size' : 1},
{'transparency': 80, 'max': 3.5, 'colour': '#f31a1c',
'min': 2.5, 'label': 'High damage', 'size' : 1}]}
.. note:: The transparency and size keys are optional. Size applies
to points only.
"""
myTargetField = theStyle['target_field']
myClasses = theStyle['style_classes']
myGeometryType = theQgisVectorLayer.geometryType()
myRangeList = []
for myClass in myClasses:
# Transparency 100: transparent
# Transparency 0: opaque
mySize = 2 # mm
if 'size' in myClass:
mySize = myClass['size']
myTransparencyPercent = 0
if 'transparency' in myClass:
myTransparencyPercent = myClass['transparency']
if 'min' not in myClass:
raise StyleError('Style info should provide a "min" entry')
if 'max' not in myClass:
raise StyleError('Style info should provide a "max" entry')
try:
myMin = float(myClass['min'])
except TypeError:
raise StyleError('Class break lower bound should be a number.'
'I got %s' % myClass['min'])
try:
myMax = float(myClass['max'])
except TypeError:
raise StyleError('Class break upper bound should be a number.'
'I got %s' % myClass['max'])
myColour = myClass['colour']
myLabel = myClass['label']
myColour = QtGui.QColor(myColour)
mySymbol = QgsSymbolV2.defaultSymbol(myGeometryType)
myColourString = "%s, %s, %s" % (
myColour.red(),
myColour.green(),
myColour.blue())
# Work around for the fact that QgsSimpleMarkerSymbolLayerV2
# python bindings are missing from the QGIS api.
# .. see:: http://hub.qgis.org/issues/4848
# We need to create a custom symbol layer as
# the border colour of a symbol can not be set otherwise
myRegistry = QgsSymbolLayerV2Registry.instance()
if myGeometryType == QGis.Point:
myMetadata = myRegistry.symbolLayerMetadata('SimpleMarker')
# note that you can get a list of available layer properties
# that you can set by doing e.g.
# QgsSimpleMarkerSymbolLayerV2.properties()
mySymbolLayer = myMetadata.createSymbolLayer({'color_border':
myColourString})
mySymbolLayer.setSize(mySize)
mySymbol.changeSymbolLayer(0, mySymbolLayer)
elif myGeometryType == QGis.Polygon:
myMetadata = myRegistry.symbolLayerMetadata('SimpleFill')
mySymbolLayer = myMetadata.createSymbolLayer({'color_border':
myColourString})
mySymbol.changeSymbolLayer(0, mySymbolLayer)
else:
# for lines we do nothing special as the property setting
# below should give us what we require.
pass
mySymbol.setColor(myColour)
# .. todo:: Check that vectors use alpha as % otherwise scale TS
# Convert transparency % to opacity
# alpha = 0: transparent
# alpha = 1: opaque
alpha = 1 - myTransparencyPercent / 100.0
mySymbol.setAlpha(alpha)
myRange = QgsRendererRangeV2(myMin,
myMax,
mySymbol,
myLabel)
myRangeList.append(myRange)
myRenderer = QgsGraduatedSymbolRendererV2('', myRangeList)
myRenderer.setMode(QgsGraduatedSymbolRendererV2.EqualInterval)
myRenderer.setClassAttribute(myTargetField)
theQgisVectorLayer.setRendererV2(myRenderer)
theQgisVectorLayer.saveDefaultStyle()
def setRasterStyle(theQgsRasterLayer, theStyle):
"""Set QGIS raster style based on InaSAFE style dictionary.
This function will set both the colour map and the transparency
for the passed in layer.
Args:
* theQgsRasterLayer: QgsRasterLayer
* style: dict - Dictionary of the form as in the example below.
Example:
style_classes = [dict(colour='#38A800', quantity=2, transparency=0),
dict(colour='#38A800', quantity=5, transparency=50),
dict(colour='#79C900', quantity=10, transparency=50),
dict(colour='#CEED00', quantity=20, transparency=50),
dict(colour='#FFCC00', quantity=50, transparency=34),
dict(colour='#FF6600', quantity=100, transparency=77),
dict(colour='#FF0000', quantity=200, transparency=24),
dict(colour='#7A0000', quantity=300, transparency=22)]
Returns:
list: RangeList
list: TransparencyList
"""
myNewStyles = _addMinMaxToStyle(theStyle['style_classes'])
# test if QGIS 1.8.0 or older
# see issue #259
if qgisVersion() <= 10800:
LOGGER.debug('Rendering raster using <= 1.8 styling')
return _setLegacyRasterStyle(theQgsRasterLayer, myNewStyles)
else:
LOGGER.debug('Rendering raster using 2+ styling')
return _setNewRasterStyle(theQgsRasterLayer, myNewStyles)
def _addMinMaxToStyle(theStyle):
"""Add a min and max to each style class in a style dictionary.
When InaSAFE provides style classes they are specific values, not ranges.
However QGIS wants to work in ranges, so this helper will address that by
updating the dictionary to include a min max value for each class.
It is assumed that we will start for 0 as the min for the first class
and the quantity of each class shall constitute the max. For all other
classes , min shall constitute the smalles increment to a float that can
meaningfully be made by python (as determined by numpy.nextafter()).
Args:
style: list - A list of dictionaries of the form as in the example
below.
Returns:
dict: A new dictionary list with min max attributes added to each
entry.
Example input:
style_classes = [dict(colour='#38A800', quantity=2, transparency=0),
dict(colour='#38A800', quantity=5, transparency=50),
dict(colour='#79C900', quantity=10, transparency=50),
dict(colour='#CEED00', quantity=20, transparency=50),
dict(colour='#FFCC00', quantity=50, transparency=34),
dict(colour='#FF6600', quantity=100, transparency=77),
dict(colour='#FF0000', quantity=200, transparency=24),
dict(colour='#7A0000', quantity=300, transparency=22)]
Example output:
style_classes = [dict(colour='#38A800', quantity=2, transparency=0,
min=0, max=2),
dict(colour='#38A800', quantity=5, transparency=50,
min=2.0000000000002, max=5),
),
dict(colour='#79C900', quantity=10, transparency=50,
min=5.0000000000002, max=10),),
dict(colour='#CEED00', quantity=20, transparency=50,
min=5.0000000000002, max=20),),
dict(colour='#FFCC00', quantity=50, transparency=34,
min=20.0000000000002, max=50),),
dict(colour='#FF6600', quantity=100, transparency=77,
min=50.0000000000002, max=100),),
dict(colour='#FF0000', quantity=200, transparency=24,
min=100.0000000000002, max=200),),
dict(colour='#7A0000', quantity=300, transparency=22,
min=200.0000000000002, max=300),)]
"""
myNewStyles = []
myLastMax = 0.0
for myClass in theStyle:
myQuantity = float(myClass['quantity'])
myClass['min'] = myLastMax
myClass['max'] = myQuantity
myLastMax = numpy.nextafter(myQuantity, sys.float_info.max)
myNewStyles.append(myClass)
return myNewStyles
def _setLegacyRasterStyle(theQgsRasterLayer, theStyle):
"""Set QGIS raster style based on InaSAFE style dictionary for QGIS < 2.0.
This function will set both the colour map and the transparency
for the passed in layer.
Args:
* theQgsRasterLayer: QgsRasterLayer.
* style: List - of the form as in the example below.
Returns:
* list: RangeList
* list: TransparencyList
Example:
style_classes = [dict(colour='#38A800', quantity=2, transparency=0),
dict(colour='#38A800', quantity=5, transparency=50),
dict(colour='#79C900', quantity=10, transparency=50),
dict(colour='#CEED00', quantity=20, transparency=50),
dict(colour='#FFCC00', quantity=50, transparency=34),
dict(colour='#FF6600', quantity=100, transparency=77),
dict(colour='#FF0000', quantity=200, transparency=24),
dict(colour='#7A0000', quantity=300, transparency=22)]
.. note:: There is currently a limitation in QGIS in that
pixel transparency values can not be specified in ranges and
consequently the opacity is of limited value and seems to
only work effectively with integer values.
"""
theQgsRasterLayer.setDrawingStyle(QgsRasterLayer.PalettedColor)
LOGGER.debug(theStyle)
myRangeList = []
myTransparencyList = []
myLastValue = 0
for myClass in theStyle:
LOGGER.debug('Evaluating class:\n%s\n' % myClass)
myMax = myClass['quantity']
myColour = QtGui.QColor(myClass['colour'])
myLabel = QtCore.QString()
if 'label' in myClass:
myLabel = QtCore.QString(myClass['label'])
myShader = QgsColorRampShader.ColorRampItem(myMax, myColour, myLabel)
myRangeList.append(myShader)
if math.isnan(myMax):
LOGGER.debug('Skipping class.')
continue
# Create opacity entries for this range
myTransparencyPercent = 0
if 'transparency' in myClass:
myTransparencyPercent = int(myClass['transparency'])
if myTransparencyPercent > 0:
# Always assign the transparency to the class' specified quantity
myPixel = \
QgsRasterTransparency.TransparentSingleValuePixel()
myPixel.pixelValue = myMax
myPixel.percentTransparent = myTransparencyPercent
myTransparencyList.append(myPixel)
# Check if range extrema are integers so we know if we can
# use them to calculate a value range
if ((myLastValue == int(myLastValue)) and (myMax == int(myMax))):
# Ensure that they are integers
# (e.g 2.0 must become 2, see issue #126)
myLastValue = int(myLastValue)
myMax = int(myMax)
# Set transparencies
myRange = range(myLastValue, myMax)
for myValue in myRange:
myPixel = \
QgsRasterTransparency.TransparentSingleValuePixel()
myPixel.pixelValue = myValue
myPixel.percentTransparent = myTransparencyPercent
myTransparencyList.append(myPixel)
#myLabel = myClass['label']
# Apply the shading algorithm and design their ramp
theQgsRasterLayer.setColorShadingAlgorithm(
QgsRasterLayer.ColorRampShader)
myFunction = theQgsRasterLayer.rasterShader().rasterShaderFunction()
# Discrete will shade any cell between maxima of this break
# and minima of previous break to the colour of this break
myFunction.setColorRampType(QgsColorRampShader.DISCRETE)
myFunction.setColorRampItemList(myRangeList)
# Now set the raster transparency
theQgsRasterLayer.rasterTransparency()\
.setTransparentSingleValuePixelList(myTransparencyList)
theQgsRasterLayer.saveDefaultStyle()
return myRangeList, myTransparencyList
def _setNewRasterStyle(theQgsRasterLayer, theClasses):
"""Set QGIS raster style based on InaSAFE style dictionary for QGIS >= 2.0.
This function will set both the colour map and the transparency
for the passed in layer.
Args:
* theQgsRasterLayer: QgsRasterLayer
* theClasses: List of the form as in the example below.
Returns:
* list: RangeList
* list: TransparencyList
Example:
style_classes = [dict(colour='#38A800', quantity=2, transparency=0),
dict(colour='#38A800', quantity=5, transparency=50),
dict(colour='#79C900', quantity=10, transparency=50),
dict(colour='#CEED00', quantity=20, transparency=50),
dict(colour='#FFCC00', quantity=50, transparency=34),
dict(colour='#FF6600', quantity=100, transparency=77),
dict(colour='#FF0000', quantity=200, transparency=24),
dict(colour='#7A0000', quantity=300, transparency=22)]
"""
# Note imports here to prevent importing on unsupported QGIS versions
# pylint: disable=E0611
# pylint: disable=W0621
# pylint: disable=W0404
from qgis.core import (QgsRasterShader,
QgsColorRampShader,
QgsSingleBandPseudoColorRenderer,
QgsRasterTransparency)
# pylint: enable=E0611
# pylint: enable=W0621
# pylint: enable=W0404
myRampItemList = []
myTransparencyList = []
LOGGER.debug(theClasses)
for myClass in theClasses:
LOGGER.debug('Evaluating class:\n%s\n' % myClass)
if 'quantity' not in myClass:
LOGGER.exception('Class has no quantity attribute')
continue
myMax = myClass['max']
if math.isnan(myMax):
LOGGER.debug('Skipping class - max is nan.')
continue
myMin = myClass['min']
if math.isnan(myMin):
LOGGER.debug('Skipping class - min is nan.')
continue
myColour = QtGui.QColor(myClass['colour'])
myLabel = QtCore.QString()
if 'label' in myClass:
myLabel = QtCore.QString(myClass['label'])
myRampItem = QgsColorRampShader.ColorRampItem(myMax, myColour, myLabel)
myRampItemList.append(myRampItem)
# Create opacity entries for this range
myTransparencyPercent = 0
if 'transparency' in myClass:
myTransparencyPercent = int(myClass['transparency'])
if myTransparencyPercent > 0:
# Check if range extrema are integers so we know if we can
# use them to calculate a value range
myPixel = QgsRasterTransparency.TransparentSingleValuePixel()
myPixel.min = myMin
# We want it just a leeetle bit smaller than max
# so that ranges are discrete
myPixel.max = myMax
myPixel.percentTransparent = myTransparencyPercent
myTransparencyList.append(myPixel)
myBand = 1 # gdal counts bands from base 1
LOGGER.debug('Setting colour ramp list')
myRasterShader = QgsRasterShader()
myColorRampShader = QgsColorRampShader()
myColorRampShader.setColorRampType(QgsColorRampShader.INTERPOLATED)
myColorRampShader.setColorRampItemList(myRampItemList)
LOGGER.debug('Setting shader function')
myRasterShader.setRasterShaderFunction(myColorRampShader)
LOGGER.debug('Setting up renderer')
myRenderer = QgsSingleBandPseudoColorRenderer(
theQgsRasterLayer.dataProvider(),
myBand,
myRasterShader)
LOGGER.debug('Assigning renderer to raster layer')
theQgsRasterLayer.setRenderer(myRenderer)
LOGGER.debug('Setting raster transparency list')
myRenderer = theQgsRasterLayer.renderer()
myTransparency = QgsRasterTransparency()
myTransparency.setTransparentSingleValuePixelList(myTransparencyList)
myRenderer.setRasterTransparency(myTransparency)
# For interest you can also view the list like this:
#pix = t.transparentSingleValuePixelList()
#for px in pix:
# print 'Min: %s Max %s Percent %s' % (
# px.min, px.max, px.percentTransparent)
LOGGER.debug('Saving style as default')
theQgsRasterLayer.saveDefaultStyle()
LOGGER.debug('Setting raster style done!')
return myRampItemList, myTransparencyList
def tr(theText):
"""We define a tr() alias here since the utilities implementation below
is not a class and does not inherit from QObject.
.. note:: see http://tinyurl.com/pyqt-differences
Args:
theText - string to be translated
Returns:
Translated version of the given string if available, otherwise
the original string.
"""
return QCoreApplication.translate('@default', theText)
def getExceptionWithStacktrace(theException, theHtml=False, theContext=None):
"""Convert exception into a string containing a stack trace.
.. note: OS File path separators will be replaced with <wbr> which is a
'soft wrap' (when theHtml=True)_that will ensure that long paths do not
force the web frame to be very wide.
Args:
* theException: Exception object.
* theHtml: Optional flag if output is to be wrapped as theHtml.
* theContext: Optional theContext message.
Returns:
Exception: with stack trace info suitable for display.
"""
myTraceback = ''.join(traceback.format_tb(sys.exc_info()[2]))
if not theHtml:
if str(theException) is None or str(theException) == '':
myErrorMessage = (theException.__class__.__name__ + ' : ' +
tr('No details provided'))
else:
myErrorMessage = (theException.__class__.__name__ + ' : ' +
str(theException))
return myErrorMessage + "\n" + myTraceback
else:
if str(theException) is None or str(theException) == '':
myErrorMessage = ('<b>' + theException.__class__.__name__ +
'</b> : ' + tr('No details provided'))
else:
myWrappedMessage = str(theException).replace(os.sep,
'<wbr>' + os.sep)
# If the message contained some html above has a side effect of
# turning </foo> into <<wbr>/foo> and <hr /> into <hr <wbr>/>
# so we need to revert that using the next two lines.
myWrappedMessage = myWrappedMessage.replace('<<wbr>' + os.sep,
'<' + os.sep)
myWrappedMessage = myWrappedMessage.replace('<wbr>' + os.sep + '>',
os.sep + '>')
myErrorMessage = ('<b>' + theException.__class__.__name__ +
'</b> : ' + myWrappedMessage)
myTraceback = ('<pre id="traceback" class="prettyprint"'
' style="display: none;">\n' + myTraceback + '</pre>')
# Wrap string in theHtml
s = '<table class="condensed">'
if theContext is not None and theContext != '':
s += ('<tr><th class="warning button-cell">'
+ tr('Error:') + '</th></tr>\n'
'<tr><td>' + theContext + '</td></tr>\n')
# now the string from the error itself
s += ('<tr><th class="problem button-cell">'
+ tr('Problem:') + '</th></tr>\n'
'<tr><td>' + myErrorMessage + '</td></tr>\n')
# now the traceback heading
s += ('<tr><th class="info button-cell" style="cursor:pointer;"'
' onclick="$(\'#traceback\').toggle();">'
+ tr('Click for Diagnostic Information:') + '</th></tr>\n'
'<tr><td>' + myTraceback + '</td></tr>\n')
s += '</table>'
return s
def getWGS84resolution(theLayer):
"""Return resolution of raster layer in EPSG:4326
Input
theLayer: Raster layer
Output
resolution.
If input layer is already in EPSG:4326, simply return the resolution
If not, work it out based on EPSG:4326 representations of its extent
"""
msg = tr('Input layer to getWGS84resolution must be a raster layer. '
'I got: %s' % str(theLayer.type())[1:-1])
if not theLayer.type() == QgsMapLayer.RasterLayer:
raise RuntimeError(msg)
if theLayer.crs().authid() == 'EPSG:4326':
# If it is already in EPSG:4326, simply use the native resolution
myCellSize = theLayer.rasterUnitsPerPixel()
else:
# Otherwise, work it out based on EPSG:4326 representations of
# its extent
# Reproject extent to EPSG:4326
myGeoCrs = QgsCoordinateReferenceSystem()
myGeoCrs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
myXForm = QgsCoordinateTransform(theLayer.crs(), myGeoCrs)
myExtent = theLayer.extent()
myProjectedExtent = myXForm.transformBoundingBox(myExtent)
# Estimate cellsize
myColumns = theLayer.width()
myGeoWidth = abs(myProjectedExtent.xMaximum() -
myProjectedExtent.xMinimum())
myCellSize = myGeoWidth / myColumns
return myCellSize
def htmlHeader():
"""Get a standard html header for wrapping content in."""
myFile = QtCore.QFile(':/plugins/inasafe/header.html')
if not myFile.open(QtCore.QIODevice.ReadOnly):
return '----'
myStream = QtCore.QTextStream(myFile)
myHeader = myStream.readAll()
myFile.close()
return myHeader
def htmlFooter():
"""Get a standard html footer for wrapping content in."""
myFile = QtCore.QFile(':/plugins/inasafe/footer.html')
if not myFile.open(QtCore.QIODevice.ReadOnly):
return '----'
myStream = QtCore.QTextStream(myFile)
myFooter = myStream.readAll()
myFile.close()
return myFooter
def qgisVersion():
"""Get the version of QGIS
Args:
None
Returns:
QGIS Version where 10700 represents QGIS 1.7 etc.
Raises:
None
"""
myVersion = None
try:
myVersion = unicode(QGis.QGIS_VERSION_INT)
except AttributeError:
myVersion = unicode(QGis.qgisVersion)[0]
myVersion = int(myVersion)
return myVersion
# TODO: move this to its own file? TS
class QgsLogHandler(logging.Handler):
"""A logging handler that will log messages to the QGIS logging console."""
def __init__(self, level=logging.NOTSET):
logging.Handler.__init__(self)
def emit(self, theRecord):
"""Try to log the message to QGIS if available, otherwise do nothing.
Args:
theRecord: logging record containing whatever info needs to be
logged.
Returns:
None
Raises:
None
"""
try:
#available from qgis 1.8
from qgis.core import QgsMessageLog
# Check logging.LogRecord properties for lots of other goodies
# like line number etc. you can get from the log message.
QgsMessageLog.logMessage(theRecord.getMessage(), 'InaSAFE', 0)
except (MethodUnavailableError, ImportError):
pass
def addLoggingHanderOnce(theLogger, theHandler):
"""A helper to add a handler to a logger, ensuring there are no duplicates.
Args:
* theLogger: logging.logger instance
* theHandler: logging.Handler instance to be added. It will not be
added if an instance of that Handler subclass already exists.
Returns:
bool: True if the logging handler was added
Raises:
None
"""
myClassName = theHandler.__class__.__name__
for myHandler in theLogger.handlers:
if myHandler.__class__.__name__ == myClassName:
return False
theLogger.addHandler(theHandler)
return True
def setupLogger(theLogFile=None, theSentryUrl=None):
"""Run once when the module is loaded and enable logging
Args:
* theLogFile: str - optional full path to a file to write logs to.
* theSentryUrl: str - optional url to sentry api for remote logging.
Defaults to http://c64a83978732474ea751d432ab943a6b
:d9d8e08786174227b9dcd8a4c3f6e9da@sentry.linfiniti.com/5
which is the sentry project for InaSAFE desktop.
Returns: None
Raises: None
Borrowed heavily from this:
http://docs.python.org/howto/logging-cookbook.html
Use this to first initialise the logger (see safe/__init__.py)::
from safe_qgis import utilities
utilities.setupLogger()
You would typically only need to do the above once ever as the
safe modle is initialised early and will set up the logger
globally so it is available to all packages / subpackages as
shown below.
In a module that wants to do logging then use this example as
a guide to get the initialised logger instance::
# The LOGGER is intialised in utilities.py by init
import logging
LOGGER = logging.getLogger('InaSAFE')
Now to log a message do::
LOGGER.debug('Some debug message')
.. note:: The file logs are written to the inasafe user tmp dir e.g.:
/tmp/inasafe/23-08-2012/timlinux/logs/inasafe.log
"""
myLogger = logging.getLogger('InaSAFE')
myLogger.setLevel(logging.DEBUG)
myDefaultHanderLevel = logging.DEBUG
# create formatter that will be added to the handlers
myFormatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create syslog handler which logs even debug messages
# (ariel): Make this log to /var/log/safe.log instead of
# /var/log/syslog
# (Tim) Ole and I discussed this - we prefer to log into the
# user's temporary working directory.
myTempDir = temp_dir('logs')
myFilename = os.path.join(myTempDir, 'inasafe.log')
if theLogFile is None:
myFileHandler = logging.FileHandler(myFilename)
else:
myFileHandler = logging.FileHandler(theLogFile)
myFileHandler.setLevel(myDefaultHanderLevel)
# create console handler with a higher log level
myConsoleHandler = logging.StreamHandler()
myConsoleHandler.setLevel(logging.INFO)
myQGISHandler = QgsLogHandler()
# TODO: User opt in before we enable email based logging.
# Email handler for errors
#myEmailServer = 'localhost'
#myEmailServerPort = 25
#mySenderAddress = 'logs@inasafe.org'
#myRecipientAddresses = ['tim@linfiniti.com']
#mySubject = 'Error'
#myEmailHandler = logging.handlers.SMTPHandler(
# (myEmailServer, myEmailServerPort),
# mySenderAddress,
# myRecipientAddresses,
# mySubject)
#myEmailHandler.setLevel(logging.ERROR)
# Sentry handler - this is optional hence the localised import
# It will only log if pip install raven. If raven is available
# logging messages will be sent to http://sentry.linfiniti.com
# We will log exceptions only there. You need to either:
# * Set env var 'INSAFE_SENTRY=1' present (value can be anything)
# * Enable the 'help improve InaSAFE by submitting errors to a remove
# server' option in InaSAFE options dialog
# before this will be enabled.
mySettings = QtCore.QSettings()
myFlag = mySettings.value('inasafe/useSentry', False).toBool()
if 'INASAFE_SENTRY' in os.environ or myFlag:
if theSentryUrl is None:
myClient = Client(
'http://c64a83978732474ea751d432ab943a6b'
':d9d8e08786174227b9dcd8a4c3f6e9da@sentry.linfiniti.com/5')
else:
myClient = Client(theSentryUrl)
mySentryHandler = SentryHandler(myClient)
mySentryHandler.setFormatter(myFormatter)
mySentryHandler.setLevel(logging.ERROR)
if addLoggingHanderOnce(myLogger, mySentryHandler):
myLogger.debug('Sentry logging enabled')
else:
myLogger.debug('Sentry logging disabled')
#Set formatters
myFileHandler.setFormatter(myFormatter)
myConsoleHandler.setFormatter(myFormatter)
#myEmailHandler.setFormatter(myFormatter)
myQGISHandler.setFormatter(myFormatter)
# add the handlers to the logger
addLoggingHanderOnce(myLogger, myFileHandler)
addLoggingHanderOnce(myLogger, myConsoleHandler)
#addLoggingHanderOnce(myLogger, myEmailHandler)
addLoggingHanderOnce(myLogger, myQGISHandler)
def getLayerAttributeNames(theLayer, theAllowedTypes, theCurrentKeyword=None):
"""iterates over self.layer and returns all the attribute names of
attributes that have int or string as field type and the position
of the theCurrentKeyword in the attribute names list
Args:
* theAllowedTypes: list(Qvariant) - a list of QVariants types that are
acceptable for the attribute.
e.g.: [QtCore.QVariant.Int, QtCore.QVariant.String]
* theCurrentKeyword - the currently stored keyword for the attribute
Returns:
* all the attribute names of attributes that have int or string as
field type
* the position of the theCurrentKeyword in the attribute names list,
this is None if theCurrentKeyword is not in the lis of attributes
Raises:
no exceptions explicitly raised
"""
if theLayer.type() == QgsMapLayer.VectorLayer:
myProvider = theLayer.dataProvider()
myProvider = myProvider.fields()
myFields = []
mySelectedIndex = None
i = 0
for f in myProvider:
# show only int or string myFields to be chosen as aggregation
# attribute other possible would be float
if myProvider[f].type() in theAllowedTypes:
myCurrentFieldName = myProvider[f].name()
myFields.append(myCurrentFieldName)
if theCurrentKeyword == myCurrentFieldName:
mySelectedIndex = i
i += 1
return myFields, mySelectedIndex
else:
return None, None
def getDefaults(theDefault=None):
"""returns a dictionary of defaults values to be used
it takes the DEFAULTS from safe and modifies them according to qgis
QSettings
Args:
* theDefault: a key of the defaults dictionary
Returns:
* A dictionary of defaults values to be used
* or the default value if a key is passed
* or None if the requested default value is not valid
Raises:
no exceptions explicitly raised
"""
mySettings = QtCore.QSettings()
myDefaults = DEFAULTS
myDefaults['FEM_RATIO'] = mySettings.value(
'inasafe/defaultFemaleRatio',
DEFAULTS['FEM_RATIO']).toDouble()[0]
if theDefault is None:
return myDefaults
elif theDefault in myDefaults:
return myDefaults[theDefault]
else:
return None
def copyInMemory(vLayer, copyName=''):
"""Return a memory copy of a layer
Input
origLayer: layer
copyName: the name of the copy
Output
memory copy of a layer
"""
if copyName is '':
copyName = vLayer.name() + ' TMP'
if vLayer.type() == QgsMapLayer.VectorLayer:
vType = vLayer.geometryType()
if vType == QGis.Point:
typeStr = 'Point'
elif vType == QGis.Line:
typeStr = 'Line'
elif vType == QGis.Polygon:
typeStr = 'Polygon'
else:
raise MemoryLayerCreationError('Layer is whether Point nor '
'Line nor Polygon')
else:
raise MemoryLayerCreationError('Layer is not a VectorLayer')
crs = vLayer.crs().authid().toLower()
myUUID = str(uuid.uuid4())
uri = '%s?crs=%s&index=yes&uuid=%s' % (typeStr, crs, myUUID)
memLayer = QgsVectorLayer(uri, copyName, 'memory')
memProvider = memLayer.dataProvider()
vProvider = vLayer.dataProvider()
vAttrs = vProvider.attributeIndexes()
vFields = vProvider.fields()
fields = []
for i in vFields:
fields.append(vFields[i])
memProvider.addAttributes(fields)
vProvider.select(vAttrs)
ft = QgsFeature()
while vProvider.nextFeature(ft):
memProvider.addFeatures([ft])
if qgisVersion() <= 10800:
# Next two lines a workaround for a QGIS bug (lte 1.8)
# preventing mem layer attributes being saved to shp.
memLayer.startEditing()
memLayer.commitChanges()
return memLayer
def mmToPoints(theMM, theDpi):
"""Convert measurement in points to one in mm.
Args:
* theMM: int - distance in millimeters
* theDpi: int - dots per inch in the print / display medium
Returns:
mm converted value
Raises:
Any exceptions raised by the InaSAFE library will be propagated.
"""
myInchAsMM = 25.4
myPoints = (theMM * theDpi) / myInchAsMM
return myPoints
def pointsToMM(thePoints, theDpi):
"""Convert measurement in points to one in mm.
Args:
* thePoints: int - number of points in display / print medium
* theDpi: int - dots per inch in the print / display medium
Returns:
mm converted value
Raises:
Any exceptions raised by the InaSAFE library will be propagated.
"""
myInchAsMM = 25.4
myMM = (float(thePoints) / theDpi) * myInchAsMM
return myMM
def dpiToMeters(theDpi):
"""Convert dots per inch (dpi) to dots perMeters.
Args:
theDpi: int - dots per inch in the print / display medium
Returns:
int - dpm converted value
Raises:
Any exceptions raised by the InaSAFE library will be propagated.
"""
myInchAsMM = 25.4
myInchesPerM = 1000.0 / myInchAsMM
myDotsPerM = myInchesPerM * theDpi
return myDotsPerM
def setupPrinter(theFilename,
theResolution=300,
thePageHeight=297,
thePageWidth=210):
"""Create a QPrinter instance defaulted to print to an A4 portrait pdf
Args:
theFilename - filename for pdf generated using this printer
Returns:
None
Raises:
None
"""
#
# Create a printer device (we are 'printing' to a pdf
#
LOGGER.debug('InaSAFE Map setupPrinter called')
myPrinter = QtGui.QPrinter()
myPrinter.setOutputFormat(QtGui.QPrinter.PdfFormat)
myPrinter.setOutputFileName(theFilename)
myPrinter.setPaperSize(QtCore.QSizeF(thePageWidth, thePageHeight),
QtGui.QPrinter.Millimeter)
myPrinter.setFullPage(True)
myPrinter.setColorMode(QtGui.QPrinter.Color)
myPrinter.setResolution(theResolution)
return myPrinter
def humaniseSeconds(theSeconds):
"""Utility function to humanise seconds value into e.g. 10 seconds ago.
The function will try to make a nice phrase of the seconds count
provided.
.. note:: Currently theSeconds that amount to days are not supported.
Args:
theSeconds: int - mandatory seconds value e.g. 1100
Returns:
str: A humanised version of the seconds count.
Raises:
None
"""
myDays = theSeconds / (3600 * 24)
myDayModulus = theSeconds % (3600 * 24)
myHours = myDayModulus / 3600
myHourModulus = myDayModulus % 3600
myMinutes = myHourModulus / 60
if theSeconds < 60:
return tr('%i seconds' % theSeconds)
if theSeconds < 120:
return tr('a minute')
if theSeconds < 3600:
return tr('minutes' % myMinutes)
if theSeconds < 7200:
return tr('over an hour')
if theSeconds < 86400:
return tr('%i hours and %i minutes' % (myHours, myMinutes))
else:
# If all else fails...
return tr('%i days, %i hours and %i minutes' % (
myDays, myHours, myMinutes))
def impactLayerAttribution(theKeywords, theInaSAFEFlag=False):
"""Make a little table for attribution of data sources used in impact.
Args:
* theKeywords: dict{} - a keywords dict for an impact layer.
* theInaSAFEFlag: bool - whether to show a little InaSAFE promotional
text in the attribution output. Defaults to False.
Returns:
str: an html snippet containing attribution information for the impact
layer. If no keywords are present or no appropriate keywords are
present, None is returned.
Raises:
None
"""
if theKeywords is None:
return None
myReport = ''
myJoinWords = ' - %s ' % tr('sourced from')
myHazardDetails = tr('Hazard details')
myHazardTitleKeyword = 'hazard_title'
myHazardSourceKeyword = 'hazard_source'
myExposureDetails = tr('Exposure details')
myExposureTitleKeyword = 'exposure_title'
myExposureSourceKeyword = 'exposure_source'
if myHazardTitleKeyword in theKeywords:
# We use safe translation infrastructure for this one (rather than Qt)
myHazardTitle = safeTr(theKeywords[myHazardTitleKeyword])
else:
myHazardTitle = tr('Hazard layer')
if myHazardSourceKeyword in theKeywords:
# We use safe translation infrastructure for this one (rather than Qt)
myHazardSource = safeTr(theKeywords[myHazardSourceKeyword])
else:
myHazardSource = tr('an unknown source')
if myExposureTitleKeyword in theKeywords:
myExposureTitle = theKeywords[myExposureTitleKeyword]
else:
myExposureTitle = tr('Exposure layer')
if myExposureSourceKeyword in theKeywords:
myExposureSource = theKeywords[myExposureSourceKeyword]
else:
myExposureSource = tr('an unknown source')
myReport += ('<table class="table table-striped condensed'
' bordered-table">')
myReport += '<tr><th>%s</th></tr>' % myHazardDetails
myReport += '<tr><td>%s%s %s.</td></tr>' % (
myHazardTitle,
myJoinWords,
myHazardSource)
myReport += '<tr><th>%s</th></tr>' % myExposureDetails
myReport += '<tr><td>%s%s %s.</td></tr>' % (
myExposureTitle,
myJoinWords,
myExposureSource)
if theInaSAFEFlag:
myReport += '<tr><th>%s</th></tr>' % tr('Software notes')
myInaSAFEPhrase = tr('This report was created using InaSAFE '
'version %1. Visit http://inasafe.org to get '
'your free copy of this software!').arg(
get_version())
myInaSAFEPhrase += tr('InaSAFE has been jointly developed by'
' BNPB, AusAid & the World Bank')
myReport += '<tr><td>%s</td></tr>' % myInaSAFEPhrase
myReport += '</table>'
return myReport
def addComboItemInOrder(theCombo, theItemText, theItemData=None):
"""Although QComboBox allows you to set an InsertAlphabetically enum
this only has effect when a user interactively adds combo items to
an editable combo. This we have this little function to ensure that
combos are always sorted alphabetically.
Args:
* theCombo - combo box receiving the new item
* theItemText - display text for the combo
* theItemData - optional UserRole data to be associated with
the item
Returns:
None
Raises:
..todo:: Move this to utilities
"""
mySize = theCombo.count()
for myCount in range(0, mySize):
myItemText = str(theCombo.itemText(myCount))
# see if theItemText alphabetically precedes myItemText
if cmp(str(theItemText).lower(), myItemText.lower()) < 0:
theCombo.insertItem(myCount, theItemText, theItemData)
return
#otherwise just add it to the end
theCombo.insertItem(mySize, theItemText, theItemData)
def isLayerPolygonal(theLayer):
"""Tell if a QGIS layer is vector and its geometries are polygons.
Args:
the theLayer
Returns:
bool - true if the theLayer contains polygons
Raises:
None
"""
try:
return (theLayer.type() == QgsMapLayer.VectorLayer) and (
theLayer.geometryType() == QGis.Polygon)
except AttributeError:
return False
def isLayerPoint(theLayer):
"""Tell if a QGIS layer is vector and its geometries are polygons.
Args:
the theLayer
Returns:
bool - true if the theLayer contains polygons
Raises:
None
"""
try:
return (theLayer.type() == QgsMapLayer.VectorLayer) and (
theLayer.geometryType() == QGis.Point)
except AttributeError:
return False
|
fivejjs/inasafe
|
safe_qgis/utilities.py
|
Python
|
gpl-3.0
| 44,063
|
[
"VisIt"
] |
972d610f5e884ca3150c8f88e3c0d5beb2d48b9ee7fde826faf5bec4aeb30133
|
#please add any of the toy examples you build with comprehensions here
fish_tuple = ('blowfish', 'clownfish', 'catfish', 'octopus')
# make a list of all fish with a for loop
fish_list = []
for fish in fish_tuple:
if fish != 'octopus':
fish_list.append(fish)
# print(fish_list)
# make a list of all fish using a list comp
fish_list = [fish for fish in fish_tuple if fish != 'octopus']
# print(fish_list)
# nesting conditionals
number_list = [x for x in range(100) if x % 3 == 0 if x % 5 == 0]
print(number_list)
list_nums = [1, 2, 3]
# multiply each nummber in a list by 3
times_three = [(num * 3) for num in list_nums]
print(times_three)
|
developerQuinnZ/this_will_work
|
student-work/ashley_riehl/week_2/day_1/working_with_csvs/comprehensions.py
|
Python
|
mit
| 656
|
[
"Octopus"
] |
13ecc69af5a5e9ce69e31936c6b32da37f0ceba21ca8dd38b5030019ffefba5c
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.hoyosta.timultipeer.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComHoyostaTimultipeerModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
hoyo/TiMultipeer
|
build.py
|
Python
|
mit
| 8,785
|
[
"VisIt"
] |
e7b2168aeeefa91f98d396e52529e8809ec3d61c1cd25d5eee5e9c12bca67430
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014 windpro
Author : windpro
E-mail : windprog@gmail.com
Date : 15/1/28
Desc :
"""
import os
import geventwebsocket
from geventwebsocket.server import WebSocketServer
PORT = 8000
def echo_app(environ, start_response):
websocket = environ.get("wsgi.websocket")
if websocket is None:
return http_handler(environ, start_response)
try:
print "new client"
while True:
message = websocket.receive()
if message is not None:
print "receive len:%s" % len(message)
else:
print "None message"
websocket.send(message)
websocket.close()
except geventwebsocket.WebSocketError, ex:
print "{0}: {1}".format(ex.__class__.__name__, ex)
def http_handler(environ, start_response):
if environ["PATH_INFO"].strip("/") == "version":
start_response("200 OK", [])
return [agent]
else:
start_response("400 Bad Request", [])
return ["WebSocket connection is expected here."]
path = os.path.dirname(geventwebsocket.__file__)
agent = "gevent-websocket/%s" % (geventwebsocket.get_version())
def visit():
import websocket
ws = websocket.create_connection("ws://localhost:%s/" % PORT)
ws.send(" "*1024*1024*16)
print ws.recv()
print "try Running %s from %s" % (agent, path)
try:
WebSocketServer(("", PORT), echo_app, debug=False).serve_forever()
except:
visit()
|
windprog/requestspool
|
tests/websocketservertest.py
|
Python
|
mit
| 1,522
|
[
"VisIt"
] |
de8cb6bee7184742613d0a512ca91201d4bbd09444aca5b2d1d4fe152315561a
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for the gdal raster driver tests.
Provides tools to simplify testing a driver, which drivers are
available, and where to find test files.
Rewrite of GDALTest class:
http://trac.osgeo.org/gdal/browser/trunk/autotest/pymod/gdaltest.py#L284
"""
import contextlib
import json
from optparse import OptionParser
import os
import unittest
from osgeo import gdal
from osgeo import osr
import gflags as flags
import logging
from autotest2.gcore import gcore_util
FLAGS = flags.FLAGS
drivers = [gdal.GetDriver(i).ShortName.lower()
for i in range(gdal.GetDriverCount())]
AAIGRID_DRIVER = 'aaigrid'
ACE2_DRIVER = 'ace2'
ADRG_DRIVER = 'adrg'
AIG_DRIVER = 'aig'
AIRSAR_DRIVER = 'airsar'
ARG_DRIVER = 'arg'
BAG_DRIVER = 'bag'
BIGGIF_DRIVER = 'biggif'
BLX_DRIVER = 'blx'
BMP_DRIVER = 'bmp'
BSB_DRIVER = 'bsb'
BT_DRIVER = 'bt'
CEOS_DRIVER = 'ceos'
COASP_DRIVER = 'coasp'
COSAR_DRIVER = 'cosar'
CPG_DRIVER = 'cpg'
CTABLE2_DRIVER = 'ctable2'
CTG_DRIVER = 'ctg'
DIMAP_DRIVER = 'dimap'
DIPEX_DRIVER = 'dipex'
DOQ1_DRIVER = 'doq1'
DOQ2_DRIVER = 'doq2'
DTED_DRIVER = 'dted'
E00GRID_DRIVER = 'e00grid'
ECRGTOC_DRIVER = 'ecrgtoc'
ECW_DRIVER = 'ecw'
EHDR_DRIVER = 'ehdr'
EIR_DRIVER = 'eir'
ELAS_DRIVER = 'elas'
ENVI_DRIVER = 'envi'
ERS_DRIVER = 'ers'
ESAT_DRIVER = 'esat'
FAST_DRIVER = 'fast'
FIT_DRIVER = 'fit'
FITS_DRIVER = 'fits'
FUJIBAS_DRIVER = 'fujibas'
GENBIN_DRIVER = 'genbin'
GFF_DRIVER = 'gff'
GIF_DRIVER = 'gif'
GMT_DRIVER = 'gmt'
GRASS_DRIVER = 'grass'
GRASSASCIIGRID_DRIVER = 'grassasciigrid'
GRIB_DRIVER = 'grib'
GS7BG_DRIVER = 'gs7bg'
GSAG_DRIVER = 'gsag'
GSBG_DRIVER = 'gsbg'
GSC_DRIVER = 'gsc'
GTIFF_DRIVER = 'gtiff'
GTX_DRIVER = 'gtx'
GXF_DRIVER = 'gxf'
HDF4_DRIVER = 'hdf4'
HDF5_DRIVER = 'hdf5'
HDF4IMAGE_DRIVER = 'hdf4image'
HDF5IMAGE_DRIVER = 'hdf5image'
HF2_DRIVER = 'hf2'
HFA_DRIVER = 'hfa'
HTTP_DRIVER = 'http'
IDA_DRIVER = 'ida'
ILWIS_DRIVER = 'ilwis'
INGR_DRIVER = 'ingr'
IRIS_DRIVER = 'iris'
ISIS2_DRIVER = 'isis2'
ISIS3_DRIVER = 'isis3'
JAXAPALSAR_DRIVER = 'jaxapalsar'
JDEM_DRIVER = 'jdem'
JP2ECW_DRIVER = 'jp2ecw'
JP2KAK_DRIVER = 'jp2kak'
JPEG2000_DRIVER = 'jpeg2000'
JP2MRSID = 'jp2mrsid'
JP2OPENJPEG = 'jp2openjpeg'
JPEG_DRIVER = 'jpeg'
JPIPKAK_DRIVER = 'jpipkak'
KMLSUPEROVERLAY_DRIVER = 'kmlsuperoverlay'
KRO_DRIVER = 'kro'
L1B_DRIVER = 'l1b'
LAN_DRIVER = 'lan'
LCP_DRIVER = 'lcp'
LEVELLER_DRIVER = 'leveller'
LOSLAS_DRIVER = 'loslas'
MAP_DRIVER = 'map'
MBTILES_DRIVER = 'mbtiles'
MEM_DRIVER = 'mem'
MFF_DRIVER = 'mff'
MFF2_DRIVER = 'mff2'
MG4LIDAR_DRIVER = 'mg4lidar'
MRSID_DRIVER = 'mrsid'
MSGN_DRIVER = 'msgn'
NDF_DRIVER = 'ndf'
NETCDF_DRIVER = 'netcdf'
NGSGEOID_DRIVER = 'ngsgeoid'
NITF_DRIVER = 'nitf'
NTV2_DRIVER = 'ntv2'
NWT_GRC_DRIVER = 'nwt_grc'
NWT_GRD_DRIVER = 'nwt_grd'
OZI_DRIVER = 'ozi'
PAUX_DRIVER = 'paux'
PCIDSK_DRIVER = 'pcidsk'
PCRASTER_DRIVER = 'pcraster'
PDF_DRIVER = 'pdf'
PDS_DRIVER = 'pds'
PNG_DRIVER = 'png'
PNM_DRIVER = 'pnm'
POSTGISRASTER_DRIVER = 'postgisraster'
R_DRIVER = 'r'
RASTERLITE_DRIVER = 'rasterlite'
RIK_DRIVER = 'rik'
RMF_DRIVER = 'rmf'
RPFTOC_DRIVER = 'rpftoc'
RS2_DRIVER = 'rs2'
RST_DRIVER = 'rst'
SAGA_DRIVER = 'saga'
SAR_CEOS_DRIVER = 'sar_ceos'
SDTS_DRIVER = 'sdts'
SGI_DRIVER = 'sgi'
SNODAS_DRIVER = 'snodas'
SRP_DRIVER = 'srp'
SRTMHGT_DRIVER = 'srtmhgt'
TERRAGEN_DRIVER = 'terragen'
TIL_DRIVER = 'til'
TSX_DRIVER = 'tsx'
USGSDEM_DRIVER = 'usgsdem'
VRT_DRIVER = 'vrt'
WCS_DRIVER = 'wcs'
WEBP_DRIVER = 'webp'
WMS_DRIVER = 'wms'
XPM_DRIVER = 'xpm'
XYZ_DRIVER = 'xyz'
ZMAP_DRIVER = 'zmap'
# A string copy of byte.tif so that tests do not need to depend on all of the
# tiff data to get a simple tiff for basic tests.
TIFF_BYTE_FILE = (
'\x49\x49\x2a\x00\x98\x01\x00\x00\x6b\x7b\x84\x73\x84\x84\x8c\x84\x84'
'\x84\x6b\x84\x6b\x84\x84\x6b\x7b\x73\x9c\x94\x73\x84\x6b\x7b\x94\x73'
'\xa5\x73\x8c\x6b\x7b\x7b\x63\x84\x7b\x84\x84\x84\x63\x9c\x73\x84\x8c'
'\x84\x7b\x73\x8c\x6b\x8c\x73\x84\x7b\x6b\x84\x84\x73\x73\x6b\x73\x6b'
'\x94\x84\x7b\x7b\x73\x84\x84\x7b\x73\x7b\x73\x7b\x6b\x73\x94\x6b\x73'
'\x8c\x73\x84\x84\x9c\x84\x8c\x84\x84\x73\x73\x73\x7b\x94\x7b\xa5\x7b'
'\x84\x6b\x6b\x84\x9c\x7b\xbd\xad\xad\x94\x94\x73\x94\x7b\x6b\x84\x73'
'\x84\x9c\x63\x7b\x73\x84\x84\xce\x6b\xc5\xad\x94\x8c\x8c\x84\x63\x84'
'\x7b\x73\x8c\x84\x84\x63\x84\x7b\x84\xad\x7b\x73\x94\x7b\x94\x73\x94'
'\x7b\x8c\x7b\x6b\x73\x84\x73\x6b\x73\x63\x7b\x63\xb5\x63\x6b\x7b\x73'
'\x84\x73\x7b\x84\x73\x84\x84\x7b\x7b\x84\x63\x73\x63\x7b\x84\x73\x73'
'\x6b\x8c\x8c\x63\x8c\x63\x73\x7b\x6b\x84\x6b\x73\x6b\x73\x7b\x84\x7b'
'\x6b\x7b\x84\x84\x84\x84\x84\x7b\x63\x84\x7b\x6b\x94\x63\x73\x7b\x8c'
'\xad\x7b\x6b\x7b\x7b\x7b\x6b\x7b\x7b\x7b\x6b\x8c\x7b\x7b\x73\x73\x5a'
'\x6b\xad\x6b\x6b\x6b\x6b\x63\x84\x7b\x73\xad\x94\x63\x7b\x7b\x6b\x7b'
'\x63\x6b\xbd\xad\x6b\x73\x73\x6b\x63\x8c\x6b\xad\x8c\x94\x84\x84\x6b'
'\x7b\x63\x63\x73\x63\x84\x63\x8c\x73\x94\x7b\x63\x84\x7b\x94\x8c\x8c'
'\x6b\x8c\x5a\x6b\x73\x6b\x5a\x63\x7b\x73\x73\x73\x7b\x7b\x94\x73\x94'
'\x63\x84\xa5\x94\x9c\x7b\x6b\x6b\x6b\x73\x8c\x63\x73\x63\x63\x6b\x73'
'\x84\x73\x5a\x7b\x73\xbd\xad\x8c\x8c\xa5\x73\x84\x5a\x63\x73\x5a\x63'
'\x63\x6b\x63\x84\x63\x6b\x84\x84\x9c\xb5\x8c\xad\x7b\x84\x63\x73\x7b'
'\x4a\x73\x63\x7b\x8c\x9c\x84\xa5\x8c\x8c\x63\xad\xf7\xff\xce\x84\x6b'
'\x8c\x7b\x94\x84\xa5\xa5\x94\x8c\x84\x7b\x6b\x7b\x6b\x7b\xb5\xb5\x9c'
'\x94\x9c\x9c\x9c\xb5\x84\x94\x73\x84\x6b\x6b\x6b\x6b\x6b\x73\x63\x6b'
'\x0f\x00\x00\x01\x03\x00\x01\x00\x00\x00\x14\x00\x00\x00\x01\x01\x03'
'\x00\x01\x00\x00\x00\x14\x00\x00\x00\x02\x01\x03\x00\x01\x00\x00\x00'
'\x08\x00\x00\x00\x03\x01\x03\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06'
'\x01\x03\x00\x01\x00\x00\x00\x01\x00\x00\x00\x11\x01\x04\x00\x01\x00'
'\x00\x00\x08\x00\x00\x00\x15\x01\x03\x00\x01\x00\x00\x00\x01\x00\x00'
'\x00\x16\x01\x03\x00\x01\x00\x00\x00\x14\x00\x00\x00\x17\x01\x04\x00'
'\x01\x00\x00\x00\x90\x01\x00\x00\x1c\x01\x03\x00\x01\x00\x00\x00\x01'
'\x00\x00\x00\x53\x01\x03\x00\x01\x00\x00\x00\x01\x00\x00\x00\x0e\x83'
'\x0c\x00\x03\x00\x00\x00\x52\x02\x00\x00\x82\x84\x0c\x00\x06\x00\x00'
'\x00\x6a\x02\x00\x00\xaf\x87\x03\x00\x18\x00\x00\x00\x9a\x02\x00\x00'
'\xb1\x87\x02\x00\x16\x00\x00\x00\xca\x02\x00\x00\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x4e\x40\x00\x00\x00\x00\x00\x00\x4e\x40\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
'\x40\xe6\x1a\x41\x00\x00\x00\x00\xcc\x9e\x4c\x41\x00\x00\x00\x00\x00'
'\x00\x00\x00\x01\x00\x01\x00\x00\x00\x05\x00\x00\x04\x00\x00\x01\x00'
'\x01\x00\x01\x04\x00\x00\x01\x00\x01\x00\x02\x04\xb1\x87\x15\x00\x00'
'\x00\x00\x0c\x00\x00\x01\x00\x57\x68\x04\x0c\x00\x00\x01\x00\x29\x23'
'\x4e\x41\x44\x32\x37\x20\x2f\x20\x55\x54\x4d\x20\x7a\x6f\x6e\x65\x20'
'\x31\x31\x4e\x7c\x00')
def SkipIfDriverMissing(driver_name):
"""Decorator that only runs a test if a required driver is found.
Args:
driver_name: Lower case short name of a driver. e.g. 'dted'.
Returns:
A pass through function if the test should be run or the unittest skip
function if the test or TestCase should not be run.
"""
def _IdReturn(obj):
return obj
debug = gdal.GetConfigOption('CPL_DEBUG')
if driver_name not in drivers:
if debug:
logging.info('Debug: Skipping test. Driver not found: %s', driver_name)
return unittest.case.skip('Skipping "%s" driver dependent test.' %
driver_name)
if debug:
logging.info('Debug: Running test. Found driver: %s', driver_name)
return _IdReturn
def GetTestFilePath(filename):
return os.path.join(
FLAGS.test_srcdir,
'autotest2/gdrivers/testdata',
os.path.split(os.path.abspath(__file__))[0],
'testdata',
filename
)
def CreateParser():
parser = OptionParser()
parser.add_option('-t', '--temp-dir', default=os.getcwd(),
help='Where to put temporary files.',
metavar='DIR')
parser.add_option('-p', '--pam-dir', default=None,
help='Where to store the .aux.xml files created '
'by the persistent auxiliary metadata system. '
'Defaults to temp-directory/pam.',
metavar='DIR')
parser.add_option('-v', '--verbose', default=False, action='store_true',
help='Put the unittest run into verbose mode.')
return parser
def Setup(options):
if options.verbose:
logging.basicConfig(level=logging.INFO)
options.temp_dir = os.path.abspath(options.temp_dir)
gdal.SetConfigOption('CPL_TMPDIR', options.temp_dir)
logging.info('CPL_TMPDIR: %s', options.temp_dir)
options.pam_dir = options.pam_dir or os.path.join(options.temp_dir, 'pam')
if not os.path.isdir(options.pam_dir):
os.mkdir(options.pam_dir)
gdal.SetConfigOption('GDAL_PAM_PROXY_DIR', options.pam_dir)
logging.info('GDAL_PAM_PROXY_DIR: %s', options.pam_dir)
class TempFiles(object):
def __init__(self):
self.count = 0
self.tmp_dir = None
def TempFile(self, basename, ext=''):
if not self.tmp_dir:
self.tmp_dir = gdal.GetConfigOption('TMPDIR')
if not self.tmp_dir:
logging.fatal('Do not have a tmp_dir!!!')
filepath = os.path.join(self.tmp_dir,
basename + '%03d' % self.count + ext)
self.count += 1
return filepath
_temp_files = TempFiles()
@contextlib.contextmanager
def ConfigOption(key, value, default=None):
"""Set a gdal config option and when the context closes, try to revert it.
TODO(schwehr): This would be better as part of gcore_util.py.
Args:
key: String naming the config option.
value: String value to set the option to.
default: String value to reset the option to if no starting value.
Yields:
None
"""
original_value = gdal.GetConfigOption(key, default)
gdal.SetConfigOption(key, value)
try:
yield
finally:
gdal.SetConfigOption(key, original_value)
class DriverTestCase(unittest.TestCase):
"""Checks the basic functioning of a single raster driver.
Assumes that only one driver is registered for the file type.
CheckOpen has a critical side effect that it puts the open data
source in the src attribute. Checks below CheckOpen in this class
assume that self.src is the original open file.
"""
def setUp(self, driver_name, ext):
super(DriverTestCase, self).setUp()
gcore_util.SetupTestEnv()
assert driver_name
self.driver_name = driver_name.lower()
self.driver = gdal.GetDriverByName(driver_name)
assert self.driver
self.ext = ext
# Start with a clean slate.
gdal.ErrorReset()
# Allow details and custom message.
self.longMessage = True
def assertIterAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
msg = msg or ''
self.assertEqual(len(first), len(second), 'lists not same length ' + msg)
for a, b in zip(first, second):
self.assertAlmostEqual(a, b, places=places, msg=msg, delta=delta)
def CheckDriver(self):
self.assertEqual(self.driver_name, self.driver.ShortName.lower())
def CheckOpen(self, filepath, check_driver=True):
"""Open the test file and keep it open as self.src.
Args:
filepath: str, Path to a file to open with GDAL.
check_driver: If True, make sure that the file opened with the
default driver for this test. If it is a str, then check that
the driver used matches the string. If False, then do not
check the driver.
"""
if filepath.startswith(os.path.sep) and not filepath.startswith('/vsi'):
self.assertTrue(os.path.isfile(filepath), 'Does not exist: ' + filepath)
self.src = gdal.Open(filepath, gdal.GA_ReadOnly)
self.assertTrue(self.src, '%s driver unable to open %s' % (self.driver_name,
filepath))
if check_driver:
driver_name = self.src.GetDriver().ShortName.lower()
if isinstance(check_driver, str) or isinstance(check_driver, unicode):
self.assertEqual(check_driver, driver_name)
else:
self.assertEqual(self.driver_name, driver_name)
self.filepath = filepath
def CheckGeoTransform(self, gt_expected, gt_delta=None):
gt = self.src.GetGeoTransform()
if not gt and not gt_expected:
return
self.assertEqual(len(gt_expected), 6)
gt_delta = gt_delta or ((abs(gt_expected[1]) + abs(gt_expected[2])) / 100.0)
for idx in range(6):
self.assertAlmostEqual(gt[idx], gt_expected[idx], delta=gt_delta)
def CheckProjection(self, prj_expected):
prj = self.src.GetProjection()
if not prj and not prj_expected:
return
src_osr = osr.SpatialReference(wkt=prj)
prj2 = osr.SpatialReference()
prj2.SetFromUserInput(prj_expected)
msg = 'Projection mismatch:\nGot:\n%s\nExpected:\n%s' % (prj, prj_expected)
self.assertTrue(src_osr.IsSame(prj2), msg=msg)
def CheckShape(self, width, height, num_bands):
self.assertEqual(width, self.src.RasterXSize)
self.assertEqual(height, self.src.RasterYSize)
self.assertEqual(num_bands, self.src.RasterCount)
def CheckBand(self, band_num, checksum, gdal_type=None, nodata=None,
min_val=None, max_val=None):
band = self.src.GetRasterBand(band_num)
self.assertEqual(band.Checksum(), checksum)
if gdal_type is not None:
self.assertEqual(gdal_type, band.DataType)
if nodata is not None:
self.assertEqual(nodata, band.GetNoDataValue())
if min_val is not None or max_val is not None:
stats = band.GetStatistics(False, True)
if min_val is not None:
self.assertAlmostEqual(min_val, stats[0])
if max_val is not None:
self.assertAlmostEqual(max_val, stats[1])
def CheckBandSubRegion(self, band_num, checksum, xoff, yoff, xsize, ysize):
band = self.src.GetRasterBand(band_num)
self.assertEqual(checksum, band.Checksum(xoff, yoff, xsize, ysize))
# TODO(schwehr): Add assertCreateCopyInterrupt method.
def CheckCreateCopy(self,
check_checksums=True,
check_stats=True,
check_geotransform=True,
check_projection=True,
options=None,
strict=True,
vsimem=False,
remove_result=False,
checksums=None,
stats=None,
metadata=None):
"""Compare a copy to the currently open file.
Args:
check_checksums: Set to False to not check checksums. Or a list of one
checksum per band.
check_stats: Compare band statistics if true. Or a list of one
(min, max) tuple per band.
check_geotransform: Set to False to skip checking the geotransform.
check_projection: Set to False to skip checking the projection.
options: List of options to pass to CreateCopy.
strict: Set to False to have the CreateCopy operation in loose mode.
vsimem: If true, copy to memory.
remove_result: If true, remove the copy when done.
checksums: Optional list of checksums. If left out, uses the checksums
from the input file will be used.
stats: Optional list of min/max tuples to compare for each band. If
left out, uses the stats from the input file.
metadata: A dictionary of metadata fields to verify.
Returns:
Open gdal raster Dataset.
"""
# TODO(schwehr): Complain if options is a str or unicode.
# TODO(schwehr): Use gdal.GetConfigOption('TMPDIR') if available.
options = options or []
basename = os.path.basename(self.src.GetFileList()[0])
if vsimem:
dst_file = os.path.join('/vsimem/', basename + self.ext)
else:
dst_file = _temp_files.TempFile(basename, self.ext)
dst = self.driver.CreateCopy(dst_file, self.src, strict=strict,
options=options)
self.assertTrue(dst)
self.assertEqual(dst.GetDriver().ShortName.lower(), self.driver_name)
# TODO(schwehr): Pre-close tests.
del dst # Flush the file.
self.dst = gdal.Open(dst_file)
self.assertTrue(self.dst)
self.assertEqual(self.dst.RasterCount, self.src.RasterCount)
for band_num in range(1, self.dst.RasterCount + 1):
src_band = self.src.GetRasterBand(band_num)
dst_band = self.dst.GetRasterBand(band_num)
if check_checksums:
dst_checksum = dst_band.Checksum()
if checksums:
self.assertEqual(dst_checksum, checksums[band_num - 1])
else:
self.assertEqual(dst_checksum, src_band.Checksum())
if check_stats:
dst_stats = dst_band.ComputeRasterMinMax()
if stats:
self.assertEqual(dst_stats, stats[band_num - 1])
else:
self.assertEqual(dst_stats, src_band.ComputeRasterMinMax())
if check_geotransform:
self.CheckGeoTransform(self.dst.GetGeoTransform())
if check_projection:
self.CheckProjection(self.dst.GetProjection())
if metadata:
result_metadata = self.dst.GetMetadata()
for key in metadata:
self.assertEqual(metadata[key], result_metadata[key])
if remove_result:
self.dst = None
self.driver.Delete(dst_file)
return self.dst
def CheckCreateCopySimple(self, data):
"""Try to make a copy from vsimem to the format under test in vsimem.
Args:
data: Contents of the source tif file to write to vsimem.
"""
filepath = '/vsimem/create_copy_simple.tif'
with gcore_util.GdalUnlinkWhenDone(filepath):
dst = gdal.VSIFOpenL(filepath, 'wb')
gdal.VSIFWriteL(data, 1, len(data), dst)
gdal.VSIFCloseL(dst)
self.CheckOpen(filepath, check_driver=GTIFF_DRIVER)
self.CheckCreateCopy(vsimem=True, remove_result=True)
def CheckInfo(self):
"""Use a golden json dump to see if the current read matches.
May need addition work in the future to keep the checks from being brittle.
Must call CheckOpen before using this.
"""
expect = json.load(open(self.filepath + '.json'))
options = gdal.InfoOptions(
format='json', computeMinMax=True, stats=True, computeChecksum=True)
result = gdal.Info(self.src, options=options)
# Save in case of failure.
result_json = json.dumps(result)
basename_json = os.path.basename(self.filepath) + '.json'
# Some drivers include the version number and a difference is okay as long
# as the driverShortName is the same, it's okay.
expect.pop('driverLongName')
result.pop('driverLongName')
description_expect = expect.pop('description')
description_result = result.pop('description')
self.assertEqual(os.path.basename(description_result), description_expect)
files_expect = expect.pop('files')
files_result = result.pop('files')
self.assertEqual(len(files_result), len(files_expect),
'%s versus %s' % (files_result, files_expect))
for filepath_result, filepath_expect in zip(files_result, files_expect):
self.assertEqual(os.path.basename(filepath_result), filepath_expect)
extent_expect_field = _GetExtentField(expect)
extent_result_field = _GetExtentField(result)
if not extent_expect_field or extent_expect_field != extent_result_field:
MaybeWriteOutputFile(basename_json, result_json)
self.assertEqual(extent_expect_field, extent_result_field, self.filepath)
extent_expect = expect.pop(extent_expect_field)['coordinates'][0]
extent_result = result.pop(extent_result_field)['coordinates'][0]
self.assertEqual(len(extent_result), len(extent_expect))
for a, b in zip(extent_result, extent_expect):
self.assertAlmostEqual(a[0], b[0], places=2, msg=self.filepath)
bands_expect = expect.pop('bands')
bands_result = result.pop('bands')
if bands_result != bands_expect:
MaybeWriteOutputFile(basename_json, result_json)
self.assertEqual(bands_result, bands_expect, self.filepath)
srs_wkt_expect = expect.pop('coordinateSystem')['wkt']
srs_wkt_result = result.pop('coordinateSystem')['wkt']
if srs_wkt_expect:
srs_expect = osr.SpatialReference(wkt=str(srs_wkt_expect))
srs_result = osr.SpatialReference(wkt=str(srs_wkt_result))
if not srs_expect.IsSame(srs_result):
MaybeWriteOutputFile(basename_json, result_json)
self.assertTrue(srs_expect.IsSame(srs_result), self.filepath)
if result != expect:
MaybeWriteOutputFile(basename_json, result_json)
self.assertEqual(result, expect, self.filepath)
def _GetExtentField(json_info):
"""The extent field must be only one of extent or wgs84Extent."""
has_extent = 'extent' in json_info
has_wgs84 = 'wgs84Extent' in json_info
if has_extent and not has_wgs84:
return 'extent'
if not has_extent and has_wgs84:
return 'wgs84Extent'
return None
def MaybeWriteOutputFile(filename, data):
"""Write a file from a test if allowed."""
if 'TEST_UNDECLARED_OUTPUTS_DIR' not in os.environ:
logging.error('Not allowed to write from the test.')
return
output_dir = os.environ['TEST_UNDECLARED_OUTPUTS_DIR']
filepath = os.path.join(output_dir, os.path.basename(filename))
open(filepath, 'w').write(data)
|
schwehr/gdal-autotest2
|
python/gdrivers/gdrivers_util.py
|
Python
|
apache-2.0
| 22,015
|
[
"NetCDF"
] |
1f418b487babb242b52342c11f596ddc3b7945b781c6df787f847d6e7dd0105a
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 4.1.4.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import range, open
import os
import os.path as op
from warnings import warn
import numpy as np
from nibabel import load
from ...utils.filemanip import split_filename
from ..base import (TraitedSpec, File, InputMultiPath,
OutputMultiPath, Undefined, traits,
isdefined)
from .base import FSLCommand, FSLCommandInputSpec
class BETInputSpec(FSLCommandInputSpec):
# We use position args here as list indices - so a negative number
# will put something on the end
in_file = File(exists=True,
desc='input file to skull strip',
argstr='%s', position=0, mandatory=True)
out_file = File(desc='name of output skull stripped image',
argstr='%s', position=1, genfile=True, hash_files=False)
outline = traits.Bool(desc='create surface outline image',
argstr='-o')
mask = traits.Bool(desc='create binary mask image',
argstr='-m')
skull = traits.Bool(desc='create skull image',
argstr='-s')
no_output = traits.Bool(argstr='-n',
desc="Don't generate segmented output")
frac = traits.Float(desc='fractional intensity threshold',
argstr='-f %.2f')
vertical_gradient = traits.Float(
argstr='-g %.2f',
desc='vertical gradient in fractional intensity threshold (-1, 1)')
radius = traits.Int(argstr='-r %d', units='mm',
desc="head radius")
center = traits.List(traits.Int, desc='center of gravity in voxels',
argstr='-c %s', minlen=0, maxlen=3,
units='voxels')
threshold = traits.Bool(
argstr='-t',
desc="apply thresholding to segmented brain image and mask")
mesh = traits.Bool(argstr='-e',
desc="generate a vtk mesh brain surface")
# the remaining 'options' are more like modes (mutually exclusive) that
# FSL actually implements in a shell script wrapper around the bet binary.
# for some combinations of them in specific order a call would not fail,
# but in general using more than one of the following is clearly not
# supported
_xor_inputs = ('functional', 'reduce_bias', 'robust', 'padding',
'remove_eyes', 'surfaces', 't2_guided')
robust = traits.Bool(
desc='robust brain centre estimation (iterates BET several times)',
argstr='-R', xor=_xor_inputs)
padding = traits.Bool(
desc=('improve BET if FOV is very small in Z (by temporarily padding '
'end slices)'),
argstr='-Z', xor=_xor_inputs)
remove_eyes = traits.Bool(
desc='eye & optic nerve cleanup (can be useful in SIENA)',
argstr='-S', xor=_xor_inputs)
surfaces = traits.Bool(
desc=('run bet2 and then betsurf to get additional skull and scalp '
'surfaces (includes registrations)'),
argstr='-A', xor=_xor_inputs)
t2_guided = File(desc='as with creating surfaces, when also feeding in '
'non-brain-extracted T2 (includes registrations)',
argstr='-A2 %s', xor=_xor_inputs)
functional = traits.Bool(argstr='-F', xor=_xor_inputs,
desc="apply to 4D fMRI data")
reduce_bias = traits.Bool(argstr='-B', xor=_xor_inputs,
desc="bias field and neck cleanup")
class BETOutputSpec(TraitedSpec):
out_file = File(
desc="path/name of skullstripped file (if generated)")
mask_file = File(
desc="path/name of binary brain mask (if generated)")
outline_file = File(
desc="path/name of outline file (if generated)")
meshfile = File(
desc="path/name of vtk mesh file (if generated)")
inskull_mask_file = File(
desc="path/name of inskull mask (if generated)")
inskull_mesh_file = File(
desc="path/name of inskull mesh outline (if generated)")
outskull_mask_file = File(
desc="path/name of outskull mask (if generated)")
outskull_mesh_file = File(
desc="path/name of outskull mesh outline (if generated)")
outskin_mask_file = File(
desc="path/name of outskin mask (if generated)")
outskin_mesh_file = File(
desc="path/name of outskin mesh outline (if generated)")
skull_mask_file = File(
desc="path/name of skull mask (if generated)")
class BET(FSLCommand):
"""Use FSL BET command for skull stripping.
For complete details, see the `BET Documentation.
<http://www.fmrib.ox.ac.uk/fsl/bet2/index.html>`_
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> btr = fsl.BET()
>>> btr.inputs.in_file = example_data('structural.nii')
>>> btr.inputs.frac = 0.7
>>> res = btr.run() # doctest: +SKIP
"""
_cmd = 'bet'
input_spec = BETInputSpec
output_spec = BETOutputSpec
def _run_interface(self, runtime):
# The returncode is meaningless in BET. So check the output
# in stderr and if it's set, then update the returncode
# accordingly.
runtime = super(BET, self)._run_interface(runtime)
if runtime.stderr:
self.raise_exception(runtime)
return runtime
def _gen_outfilename(self):
out_file = self.inputs.out_file
if not isdefined(out_file) and isdefined(self.inputs.in_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_brain')
return os.path.abspath(out_file)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self._gen_outfilename()
if ((isdefined(self.inputs.mesh) and self.inputs.mesh) or
(isdefined(self.inputs.surfaces) and self.inputs.surfaces)):
outputs['meshfile'] = self._gen_fname(outputs['out_file'],
suffix='_mesh.vtk',
change_ext=False)
if (isdefined(self.inputs.mask) and self.inputs.mask) or \
(isdefined(self.inputs.reduce_bias) and
self.inputs.reduce_bias):
outputs['mask_file'] = self._gen_fname(outputs['out_file'],
suffix='_mask')
if isdefined(self.inputs.outline) and self.inputs.outline:
outputs['outline_file'] = self._gen_fname(
outputs['out_file'], suffix='_overlay')
if isdefined(self.inputs.surfaces) and self.inputs.surfaces:
outputs['inskull_mask_file'] = self._gen_fname(
outputs['out_file'], suffix='_inskull_mask')
outputs['inskull_mesh_file'] = self._gen_fname(
outputs['out_file'], suffix='_inskull_mesh')
outputs['outskull_mask_file'] = self._gen_fname(
outputs['out_file'], suffix='_outskull_mask')
outputs['outskull_mesh_file'] = self._gen_fname(
outputs['out_file'], suffix='_outskull_mesh')
outputs['outskin_mask_file'] = self._gen_fname(
outputs['out_file'], suffix='_outskin_mask')
outputs['outskin_mesh_file'] = self._gen_fname(
outputs['out_file'], suffix='_outskin_mesh')
outputs['skull_mask_file'] = self._gen_fname(
outputs['out_file'], suffix='_skull_mask')
if isdefined(self.inputs.no_output) and self.inputs.no_output:
outputs['out_file'] = Undefined
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
return None
class FASTInputSpec(FSLCommandInputSpec):
""" Defines inputs (trait classes) for FAST """
in_files = InputMultiPath(File(exists=True), copyfile=False,
desc='image, or multi-channel set of images, '
'to be segmented',
argstr='%s', position=-1, mandatory=True)
out_basename = File(desc='base name of output files',
argstr='-o %s')
# ^^ uses in_file name as basename if none given
number_classes = traits.Range(low=1, high=10, argstr='-n %d',
desc='number of tissue-type classes')
output_biasfield = traits.Bool(desc='output estimated bias field',
argstr='-b')
output_biascorrected = traits.Bool(
desc='output restored image (bias-corrected image)', argstr='-B')
img_type = traits.Enum(
(1, 2, 3),
desc='int specifying type of image: (1 = T1, 2 = T2, 3 = PD)',
argstr='-t %d')
bias_iters = traits.Range(low=1, high=10, argstr='-I %d',
desc='number of main-loop iterations during '
'bias-field removal')
bias_lowpass = traits.Range(low=4, high=40,
desc='bias field smoothing extent (FWHM) '
'in mm',
argstr='-l %d', units='mm')
init_seg_smooth = traits.Range(low=0.0001, high=0.1,
desc='initial segmentation spatial '
'smoothness (during bias field '
'estimation)',
argstr='-f %.3f')
segments = traits.Bool(desc='outputs a separate binary image for each '
'tissue type',
argstr='-g')
init_transform = File(exists=True, desc='<standard2input.mat> initialise'
' using priors',
argstr='-a %s')
other_priors = InputMultiPath(
File(exist=True), desc='alternative prior images',
argstr='-A %s', minlen=3, maxlen=3)
no_pve = traits.Bool(desc='turn off PVE (partial volume estimation)',
argstr='--nopve')
no_bias = traits.Bool(desc='do not remove bias field',
argstr='-N')
use_priors = traits.Bool(desc='use priors throughout',
argstr='-P')
# ^^ Must also set -a!, mutually inclusive?? No, conditional mandatory... need to figure out how to handle with traits.
segment_iters = traits.Range(low=1, high=50,
desc='number of segmentation-initialisation'
' iterations',
argstr='-W %d')
mixel_smooth = traits.Range(low=0.0, high=1.0,
desc='spatial smoothness for mixeltype',
argstr='-R %.2f')
iters_afterbias = traits.Range(low=1, high=20,
desc='number of main-loop iterations '
'after bias-field removal',
argstr='-O %d')
hyper = traits.Range(low=0.0, high=1.0,
desc='segmentation spatial smoothness',
argstr='-H %.2f')
verbose = traits.Bool(desc='switch on diagnostic messages',
argstr='-v')
manual_seg = File(exists=True, desc='Filename containing intensities',
argstr='-s %s')
probability_maps = traits.Bool(desc='outputs individual probability maps',
argstr='-p')
class FASTOutputSpec(TraitedSpec):
"""Specify possible outputs from FAST"""
tissue_class_map = File(exists=True,
desc='path/name of binary segmented volume file'
' one val for each class _seg')
tissue_class_files = OutputMultiPath(File(
desc=('path/name of binary segmented volumes one file for each class '
'_seg_x')))
restored_image = OutputMultiPath(File(
desc=('restored images (one for each input image) named according to '
'the input images _restore')))
mixeltype = File(desc="path/name of mixeltype volume file _mixeltype")
partial_volume_map = File(desc="path/name of partial volume file _pveseg")
partial_volume_files = OutputMultiPath(File(
desc='path/name of partial volumes files one for each class, _pve_x'))
bias_field = OutputMultiPath(File(desc='Estimated bias field _bias'))
probability_maps = OutputMultiPath(File(
desc='filenames, one for each class, for each input, prob_x'))
class FAST(FSLCommand):
""" Use FSL FAST for segmenting and bias correction.
For complete details, see the `FAST Documentation.
<http://www.fmrib.ox.ac.uk/fsl/fast4/index.html>`_
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
Assign options through the ``inputs`` attribute:
>>> fastr = fsl.FAST()
>>> fastr.inputs.in_files = example_data('structural.nii')
>>> out = fastr.run() #doctest: +SKIP
"""
_cmd = 'fast'
input_spec = FASTInputSpec
output_spec = FASTOutputSpec
def _format_arg(self, name, spec, value):
# first do what should be done in general
formated = super(FAST, self)._format_arg(name, spec, value)
if name == 'in_files':
# FAST needs the -S parameter value to correspond to the number
# of input images, otherwise it will ignore all but the first
formated = "-S %d %s" % (len(value), formated)
return formated
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.number_classes):
nclasses = 3
else:
nclasses = self.inputs.number_classes
# when using multichannel, results basename is based on last
# input filename
_gen_fname_opts = {}
if isdefined(self.inputs.out_basename):
_gen_fname_opts['basename'] = self.inputs.out_basename
_gen_fname_opts['cwd'] = os.getcwd()
else:
_gen_fname_opts['basename'] = self.inputs.in_files[-1]
_gen_fname_opts['cwd'], _, _ = split_filename(_gen_fname_opts['basename'])
outputs['tissue_class_map'] = self._gen_fname(suffix='_seg', **_gen_fname_opts)
if self.inputs.segments:
outputs['tissue_class_files'] = []
for i in range(nclasses):
outputs['tissue_class_files'].append(
self._gen_fname(suffix='_seg_%d' % i, **_gen_fname_opts))
if isdefined(self.inputs.output_biascorrected):
outputs['restored_image'] = []
if len(self.inputs.in_files) > 1:
# for multi-image segmentation there is one corrected image
# per input
for val, f in enumerate(self.inputs.in_files):
# image numbering is 1-based
outputs['restored_image'].append(
self._gen_fname(suffix='_restore_%d' % (val + 1), **_gen_fname_opts))
else:
# single image segmentation has unnumbered output image
outputs['restored_image'].append(
self._gen_fname(suffix='_restore', **_gen_fname_opts))
outputs['mixeltype'] = self._gen_fname(suffix='_mixeltype', **_gen_fname_opts)
if not self.inputs.no_pve:
outputs['partial_volume_map'] = self._gen_fname(
suffix='_pveseg', **_gen_fname_opts)
outputs['partial_volume_files'] = []
for i in range(nclasses):
outputs[
'partial_volume_files'].append(
self._gen_fname(suffix='_pve_%d' % i, **_gen_fname_opts))
if self.inputs.output_biasfield:
outputs['bias_field'] = []
if len(self.inputs.in_files) > 1:
# for multi-image segmentation there is one bias field image
# per input
for val, f in enumerate(self.inputs.in_files):
# image numbering is 1-based
outputs['bias_field'].append(
self._gen_fname(suffix='_bias_%d' % (val + 1), **_gen_fname_opts))
else:
# single image segmentation has unnumbered output image
outputs['bias_field'].append(
self._gen_fname(suffix='_bias', **_gen_fname_opts))
if self.inputs.probability_maps:
outputs['probability_maps'] = []
for i in range(nclasses):
outputs['probability_maps'].append(
self._gen_fname(suffix='_prob_%d' % i, **_gen_fname_opts))
return outputs
class FLIRTInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='-in %s', mandatory=True,
position=0, desc='input file')
reference = File(exists=True, argstr='-ref %s', mandatory=True,
position=1, desc='reference file')
out_file = File(argstr='-out %s', desc='registered output file',
name_source=['in_file'], name_template='%s_flirt',
position=2, hash_files=False)
out_matrix_file = File(argstr='-omat %s',
name_source=['in_file'], keep_extension=True,
name_template='%s_flirt.mat',
desc='output affine matrix in 4x4 asciii format',
position=3, hash_files=False)
out_log = File(name_source=['in_file'], keep_extension=True,
requires=['save_log'],
name_template='%s_flirt.log', desc='output log')
in_matrix_file = File(argstr='-init %s', desc='input 4x4 affine matrix')
apply_xfm = traits.Bool(
argstr='-applyxfm', requires=['in_matrix_file'],
desc='apply transformation supplied by in_matrix_file')
apply_isoxfm = traits.Float(
argstr='-applyisoxfm %f', xor=['apply_xfm'],
desc='as applyxfm but forces isotropic resampling')
datatype = traits.Enum('char', 'short', 'int', 'float', 'double',
argstr='-datatype %s',
desc='force output data type')
cost = traits.Enum('mutualinfo', 'corratio', 'normcorr', 'normmi',
'leastsq', 'labeldiff', 'bbr',
argstr='-cost %s',
desc='cost function')
# XXX What is the difference between 'cost' and 'searchcost'? Are
# these both necessary or do they map to the same variable.
cost_func = traits.Enum('mutualinfo', 'corratio', 'normcorr', 'normmi',
'leastsq', 'labeldiff', 'bbr',
argstr='-searchcost %s',
desc='cost function')
uses_qform = traits.Bool(argstr='-usesqform',
desc='initialize using sform or qform')
display_init = traits.Bool(argstr='-displayinit',
desc='display initial matrix')
angle_rep = traits.Enum('quaternion', 'euler',
argstr='-anglerep %s',
desc='representation of rotation angles')
interp = traits.Enum('trilinear', 'nearestneighbour', 'sinc', 'spline',
argstr='-interp %s',
desc='final interpolation method used in reslicing')
sinc_width = traits.Int(argstr='-sincwidth %d', units='voxels',
desc='full-width in voxels')
sinc_window = traits.Enum('rectangular', 'hanning', 'blackman',
argstr='-sincwindow %s',
desc='sinc window') # XXX better doc
bins = traits.Int(argstr='-bins %d', desc='number of histogram bins')
dof = traits.Int(argstr='-dof %d',
desc='number of transform degrees of freedom')
no_resample = traits.Bool(argstr='-noresample',
desc='do not change input sampling')
force_scaling = traits.Bool(argstr='-forcescaling',
desc='force rescaling even for low-res images')
min_sampling = traits.Float(
argstr='-minsampling %f', units='mm',
desc='set minimum voxel dimension for sampling')
padding_size = traits.Int(argstr='-paddingsize %d', units='voxels',
desc='for applyxfm: interpolates outside image '
'by size')
searchr_x = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees',
argstr='-searchrx %s',
desc='search angles along x-axis, in degrees')
searchr_y = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees',
argstr='-searchry %s',
desc='search angles along y-axis, in degrees')
searchr_z = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees',
argstr='-searchrz %s',
desc='search angles along z-axis, in degrees')
no_search = traits.Bool(argstr='-nosearch',
desc='set all angular searches to ranges 0 to 0')
coarse_search = traits.Int(argstr='-coarsesearch %d', units='degrees',
desc='coarse search delta angle')
fine_search = traits.Int(argstr='-finesearch %d', units='degrees',
desc='fine search delta angle')
schedule = File(exists=True, argstr='-schedule %s',
desc='replaces default schedule')
ref_weight = File(exists=True, argstr='-refweight %s',
desc='File for reference weighting volume')
in_weight = File(exists=True, argstr='-inweight %s',
desc='File for input weighting volume')
no_clamp = traits.Bool(argstr='-noclamp',
desc='do not use intensity clamping')
no_resample_blur = traits.Bool(argstr='-noresampblur',
desc='do not use blurring on downsampling')
rigid2D = traits.Bool(argstr='-2D',
desc='use 2D rigid body mode - ignores dof')
save_log = traits.Bool(desc='save to log file')
verbose = traits.Int(argstr='-verbose %d',
desc='verbose mode, 0 is least')
bgvalue = traits.Float(0, argstr='-setbackground %f',
desc=('use specified background value for points '
'outside FOV'))
# BBR options
wm_seg = File(
argstr='-wmseg %s', min_ver='5.0.0',
desc='white matter segmentation volume needed by BBR cost function')
wmcoords = File(
argstr='-wmcoords %s', min_ver='5.0.0',
desc='white matter boundary coordinates for BBR cost function')
wmnorms = File(
argstr='-wmnorms %s', min_ver='5.0.0',
desc='white matter boundary normals for BBR cost function')
fieldmap = File(
argstr='-fieldmap %s', min_ver='5.0.0',
desc=('fieldmap image in rads/s - must be already registered to the '
'reference image'))
fieldmapmask = File(
argstr='-fieldmapmask %s', min_ver='5.0.0',
desc='mask for fieldmap image')
pedir = traits.Int(
argstr='-pedir %d', min_ver='5.0.0',
desc='phase encode direction of EPI - 1/2/3=x/y/z & -1/-2/-3=-x/-y/-z')
echospacing = traits.Float(
argstr='-echospacing %f', min_ver='5.0.0',
desc='value of EPI echo spacing - units of seconds')
bbrtype = traits.Enum(
'signed', 'global_abs', 'local_abs',
argstr='-bbrtype %s', min_ver='5.0.0',
desc=('type of bbr cost function: signed [default], global_abs, '
'local_abs'))
bbrslope = traits.Float(
argstr='-bbrslope %f', min_ver='5.0.0',
desc='value of bbr slope')
class FLIRTOutputSpec(TraitedSpec):
out_file = File(exists=True,
desc='path/name of registered file (if generated)')
out_matrix_file = File(exists=True,
desc='path/name of calculated affine transform '
'(if generated)')
out_log = File(desc='path/name of output log (if generated)')
class FLIRT(FSLCommand):
"""Use FSL FLIRT for coregistration.
For complete details, see the `FLIRT Documentation.
<http://www.fmrib.ox.ac.uk/fsl/flirt/index.html>`_
To print out the command line help, use:
fsl.FLIRT().inputs_help()
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> flt = fsl.FLIRT(bins=640, cost_func='mutualinfo')
>>> flt.inputs.in_file = 'structural.nii'
>>> flt.inputs.reference = 'mni.nii'
>>> flt.inputs.output_type = "NIFTI_GZ"
>>> flt.cmdline # doctest: +ELLIPSIS +IGNORE_UNICODE
'flirt -in structural.nii -ref mni.nii -out structural_flirt.nii.gz -omat structural_flirt.mat -bins 640 -searchcost mutualinfo'
>>> res = flt.run() #doctest: +SKIP
"""
_cmd = 'flirt'
input_spec = FLIRTInputSpec
output_spec = FLIRTOutputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = super(FLIRT, self).aggregate_outputs(
runtime=runtime, needed_outputs=needed_outputs)
if isdefined(self.inputs.save_log) and self.inputs.save_log:
with open(outputs.out_log, "a") as text_file:
text_file.write(runtime.stdout + '\n')
return outputs
def _parse_inputs(self, skip=None):
skip = []
if isdefined(self.inputs.save_log) and self.inputs.save_log:
if not isdefined(self.inputs.verbose) or self.inputs.verbose == 0:
self.inputs.verbose = 1
skip.append('save_log')
return super(FLIRT, self)._parse_inputs(skip=skip)
class ApplyXFMInputSpec(FLIRTInputSpec):
apply_xfm = traits.Bool(
True, argstr='-applyxfm', requires=['in_matrix_file'],
desc='apply transformation supplied by in_matrix_file',
usedefault=True)
class ApplyXFM(FLIRT):
"""Currently just a light wrapper around FLIRT,
with no modifications
ApplyXFM is used to apply an existing tranform to an image
Examples
--------
>>> import nipype.interfaces.fsl as fsl
>>> from nipype.testing import example_data
>>> applyxfm = fsl.preprocess.ApplyXFM()
>>> applyxfm.inputs.in_file = example_data('structural.nii')
>>> applyxfm.inputs.in_matrix_file = example_data('trans.mat')
>>> applyxfm.inputs.out_file = 'newfile.nii'
>>> applyxfm.inputs.reference = example_data('mni.nii')
>>> applyxfm.inputs.apply_xfm = True
>>> result = applyxfm.run() # doctest: +SKIP
"""
input_spec = ApplyXFMInputSpec
class ApplyXfm(ApplyXFM):
"""
.. deprecated:: 0.12.1
Use :py:class:`nipype.interfaces.fsl.ApplyXFM` instead
"""
def __init__(self, **inputs):
super(ApplyXfm, self).__init__(**inputs)
warn(('This interface has been renamed since 0.12.1, please use '
'nipype.interfaces.fsl.ApplyXFM'),
UserWarning)
class MCFLIRTInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, position=0, argstr="-in %s", mandatory=True,
desc="timeseries to motion-correct")
out_file = File(argstr='-out %s', genfile=True,
desc="file to write", hash_files=False)
cost = traits.Enum(
'mutualinfo', 'woods', 'corratio',
'normcorr', 'normmi', 'leastsquares',
argstr='-cost %s', desc="cost function to optimize")
bins = traits.Int(argstr='-bins %d', desc="number of histogram bins")
dof = traits.Int(
argstr='-dof %d', desc="degrees of freedom for the transformation")
ref_vol = traits.Int(argstr='-refvol %d', desc="volume to align frames to")
scaling = traits.Float(
argstr='-scaling %.2f', desc="scaling factor to use")
smooth = traits.Float(
argstr='-smooth %.2f', desc="smoothing factor for the cost function")
rotation = traits.Int(
argstr='-rotation %d', desc="scaling factor for rotation tolerances")
stages = traits.Int(
argstr='-stages %d',
desc="stages (if 4, perform final search with sinc interpolation")
init = File(exists=True, argstr='-init %s',
desc="inital transformation matrix")
interpolation = traits.Enum("spline", "nn", "sinc", argstr="-%s_final",
desc="interpolation method for transformation")
use_gradient = traits.Bool(
argstr='-gdt', desc="run search on gradient images")
use_contour = traits.Bool(
argstr='-edge', desc="run search on contour images")
mean_vol = traits.Bool(argstr='-meanvol', desc="register to mean volume")
stats_imgs = traits.Bool(
argstr='-stats', desc="produce variance and std. dev. images")
save_mats = traits.Bool(
argstr='-mats', desc="save transformation matrices")
save_plots = traits.Bool(
argstr='-plots', desc="save transformation parameters")
save_rms = traits.Bool(
argstr='-rmsabs -rmsrel', desc="save rms displacement parameters")
ref_file = File(exists=True, argstr='-reffile %s',
desc="target image for motion correction")
class MCFLIRTOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="motion-corrected timeseries")
variance_img = File(exists=True, desc="variance image")
std_img = File(exists=True, desc="standard deviation image")
mean_img = File(exists=True, desc="mean timeseries image")
par_file = File(exists=True, desc="text-file with motion parameters")
mat_file = OutputMultiPath(File(
exists=True), desc="transformation matrices")
rms_files = OutputMultiPath(File(
exists=True),
desc="absolute and relative displacement parameters")
class MCFLIRT(FSLCommand):
"""Use FSL MCFLIRT to do within-modality motion correction.
For complete details, see the `MCFLIRT Documentation.
<http://www.fmrib.ox.ac.uk/fsl/mcflirt/index.html>`_
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> mcflt = fsl.MCFLIRT(in_file=example_data('functional.nii'), cost='mutualinfo')
>>> res = mcflt.run() # doctest: +SKIP
"""
_cmd = 'mcflirt'
input_spec = MCFLIRTInputSpec
output_spec = MCFLIRTOutputSpec
def _format_arg(self, name, spec, value):
if name == "interpolation":
if value == "trilinear":
return ""
else:
return spec.argstr % value
return super(MCFLIRT, self)._format_arg(name, spec, value)
def _list_outputs(self):
cwd = os.getcwd()
outputs = self._outputs().get()
outputs['out_file'] = self._gen_outfilename()
if isdefined(self.inputs.stats_imgs) and self.inputs.stats_imgs:
outputs['variance_img'] = self._gen_fname(outputs['out_file'] +
'_variance.ext', cwd=cwd)
outputs['std_img'] = self._gen_fname(outputs['out_file'] +
'_sigma.ext', cwd=cwd)
# The mean image created if -stats option is specified ('meanvol')
# is missing the top and bottom slices. Therefore we only expose the
# mean image created by -meanvol option ('mean_reg') which isn't
# corrupted.
# Note that the same problem holds for the std and variance image.
if isdefined(self.inputs.mean_vol) and self.inputs.mean_vol:
outputs['mean_img'] = self._gen_fname(outputs['out_file'] +
'_mean_reg.ext', cwd=cwd)
if isdefined(self.inputs.save_mats) and self.inputs.save_mats:
_, filename = os.path.split(outputs['out_file'])
matpathname = os.path.join(cwd, filename + '.mat')
_, _, _, timepoints = load(self.inputs.in_file).shape
outputs['mat_file'] = []
for t in range(timepoints):
outputs['mat_file'].append(os.path.join(matpathname,
'MAT_%04d' % t))
if isdefined(self.inputs.save_plots) and self.inputs.save_plots:
# Note - if e.g. out_file has .nii.gz, you get .nii.gz.par,
# which is what mcflirt does!
outputs['par_file'] = outputs['out_file'] + '.par'
if isdefined(self.inputs.save_rms) and self.inputs.save_rms:
outfile = outputs['out_file']
outputs['rms_files'] = [outfile + '_abs.rms', outfile + '_rel.rms']
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
return None
def _gen_outfilename(self):
out_file = self.inputs.out_file
if isdefined(out_file):
out_file = os.path.realpath(out_file)
if not isdefined(out_file) and isdefined(self.inputs.in_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_mcf')
return os.path.abspath(out_file)
class FNIRTInputSpec(FSLCommandInputSpec):
ref_file = File(exists=True, argstr='--ref=%s', mandatory=True,
desc='name of reference image')
in_file = File(exists=True, argstr='--in=%s', mandatory=True,
desc='name of input image')
affine_file = File(exists=True, argstr='--aff=%s',
desc='name of file containing affine transform')
inwarp_file = File(exists=True, argstr='--inwarp=%s',
desc='name of file containing initial non-linear warps')
in_intensitymap_file = File(exists=True, argstr='--intin=%s',
desc=('name of file/files containing initial '
'intensity maping usually generated by '
'previous fnirt run'))
fieldcoeff_file = traits.Either(
traits.Bool, File, argstr='--cout=%s',
desc='name of output file with field coefficients or true')
warped_file = File(argstr='--iout=%s',
desc='name of output image', genfile=True,
hash_files=False)
field_file = traits.Either(traits.Bool, File,
argstr='--fout=%s',
desc='name of output file with field or true',
hash_files=False)
jacobian_file = traits.Either(traits.Bool, File,
argstr='--jout=%s',
desc=('name of file for writing out the '
'Jacobian of the field (for '
'diagnostic or VBM purposes)'),
hash_files=False)
modulatedref_file = traits.Either(traits.Bool, File,
argstr='--refout=%s',
desc=('name of file for writing out '
'intensity modulated --ref (for '
'diagnostic purposes)'),
hash_files=False)
out_intensitymap_file = traits.Either(traits.Bool, File,
argstr='--intout=%s',
desc=('name of files for writing '
'information pertaining to '
'intensity mapping'),
hash_files=False)
log_file = File(argstr='--logout=%s',
desc='Name of log-file', genfile=True, hash_files=False)
config_file = traits.Either(
traits.Enum("T1_2_MNI152_2mm", "FA_2_FMRIB58_1mm"), File(exists=True),
argstr='--config=%s',
desc='Name of config file specifying command line arguments')
refmask_file = File(exists=True, argstr='--refmask=%s',
desc='name of file with mask in reference space')
inmask_file = File(exists=True, argstr='--inmask=%s',
desc='name of file with mask in input image space')
skip_refmask = traits.Bool(
argstr='--applyrefmask=0', xor=['apply_refmask'],
desc='Skip specified refmask if set, default false')
skip_inmask = traits.Bool(
argstr='--applyinmask=0', xor=['apply_inmask'],
desc='skip specified inmask if set, default false')
apply_refmask = traits.List(
traits.Enum(0, 1), argstr='--applyrefmask=%s', xor=['skip_refmask'],
desc=('list of iterations to use reference mask on (1 to use, 0 to '
'skip)'),
sep=",")
apply_inmask = traits.List(
traits.Enum(0, 1), argstr='--applyinmask=%s', xor=['skip_inmask'],
desc='list of iterations to use input mask on (1 to use, 0 to skip)',
sep=",")
skip_implicit_ref_masking = traits.Bool(
argstr='--imprefm=0',
desc=('skip implicit masking based on value in --ref image. '
'Default = 0'))
skip_implicit_in_masking = traits.Bool(
argstr='--impinm=0',
desc=('skip implicit masking based on value in --in image. '
'Default = 0'))
refmask_val = traits.Float(
argstr='--imprefval=%f',
desc='Value to mask out in --ref image. Default =0.0')
inmask_val = traits.Float(
argstr='--impinval=%f',
desc='Value to mask out in --in image. Default =0.0')
max_nonlin_iter = traits.List(
traits.Int,
argstr='--miter=%s',
desc='Max # of non-linear iterations list, default [5, 5, 5, 5]',
sep=",")
subsampling_scheme = traits.List(
traits.Int,
argstr='--subsamp=%s',
desc='sub-sampling scheme, list, default [4, 2, 1, 1]',
sep=",")
warp_resolution = traits.Tuple(
traits.Int, traits.Int, traits.Int,
argstr='--warpres=%d,%d,%d',
desc=('(approximate) resolution (in mm) of warp basis in x-, y- and '
'z-direction, default 10, 10, 10'))
spline_order = traits.Int(
argstr='--splineorder=%d',
desc='Order of spline, 2->Qadratic spline, 3->Cubic spline. Default=3')
in_fwhm = traits.List(
traits.Int, argstr='--infwhm=%s',
desc=('FWHM (in mm) of gaussian smoothing kernel for input volume, '
'default [6, 4, 2, 2]'),
sep=",")
ref_fwhm = traits.List(
traits.Int, argstr='--reffwhm=%s',
desc=('FWHM (in mm) of gaussian smoothing kernel for ref volume, '
'default [4, 2, 0, 0]'),
sep=",")
regularization_model = traits.Enum(
'membrane_energy', 'bending_energy',
argstr='--regmod=%s',
desc=('Model for regularisation of warp-field [membrane_energy '
'bending_energy], default bending_energy'))
regularization_lambda = traits.List(
traits.Float, argstr='--lambda=%s',
desc=('Weight of regularisation, default depending on --ssqlambda and '
'--regmod switches. See user documetation.'),
sep=",")
skip_lambda_ssq = traits.Bool(
argstr='--ssqlambda=0',
desc='If true, lambda is not weighted by current ssq, default false')
jacobian_range = traits.Tuple(
traits.Float, traits.Float,
argstr='--jacrange=%f,%f',
desc='Allowed range of Jacobian determinants, default 0.01, 100.0')
derive_from_ref = traits.Bool(
argstr='--refderiv',
desc=('If true, ref image is used to calculate derivatives. '
'Default false'))
intensity_mapping_model = traits.Enum(
'none', 'global_linear', 'global_non_linear'
'local_linear', 'global_non_linear_with_bias',
'local_non_linear', argstr='--intmod=%s',
desc='Model for intensity-mapping')
intensity_mapping_order = traits.Int(
argstr='--intorder=%d',
desc='Order of poynomial for mapping intensities, default 5')
biasfield_resolution = traits.Tuple(
traits.Int, traits.Int, traits.Int,
argstr='--biasres=%d,%d,%d',
desc=('Resolution (in mm) of bias-field modelling local intensities, '
'default 50, 50, 50'))
bias_regularization_lambda = traits.Float(
argstr='--biaslambda=%f',
desc='Weight of regularisation for bias-field, default 10000')
skip_intensity_mapping = traits.Bool(
argstr='--estint=0', xor=['apply_intensity_mapping'],
desc='Skip estimate intensity-mapping default false')
apply_intensity_mapping = traits.List(
traits.Enum(0, 1), argstr='--estint=%s',
xor=['skip_intensity_mapping'],
desc=('List of subsampling levels to apply intensity mapping for '
'(0 to skip, 1 to apply)'),
sep=",")
hessian_precision = traits.Enum(
'double', 'float', argstr='--numprec=%s',
desc=('Precision for representing Hessian, double or float. '
'Default double'))
class FNIRTOutputSpec(TraitedSpec):
fieldcoeff_file = File(exists=True, desc='file with field coefficients')
warped_file = File(exists=True, desc='warped image')
field_file = File(desc='file with warp field')
jacobian_file = File(desc='file containing Jacobian of the field')
modulatedref_file = File(desc='file containing intensity modulated --ref')
out_intensitymap_file = File(
desc='file containing info pertaining to intensity mapping')
log_file = File(desc='Name of log-file')
class FNIRT(FSLCommand):
"""Use FSL FNIRT for non-linear registration.
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> fnt = fsl.FNIRT(affine_file=example_data('trans.mat'))
>>> res = fnt.run(ref_file=example_data('mni.nii', in_file=example_data('structural.nii')) #doctest: +SKIP
T1 -> Mni153
>>> from nipype.interfaces import fsl
>>> fnirt_mprage = fsl.FNIRT()
>>> fnirt_mprage.inputs.in_fwhm = [8, 4, 2, 2]
>>> fnirt_mprage.inputs.subsampling_scheme = [4, 2, 1, 1]
Specify the resolution of the warps
>>> fnirt_mprage.inputs.warp_resolution = (6, 6, 6)
>>> res = fnirt_mprage.run(in_file='structural.nii', ref_file='mni.nii', warped_file='warped.nii', fieldcoeff_file='fieldcoeff.nii')#doctest: +SKIP
We can check the command line and confirm that it's what we expect.
>>> fnirt_mprage.cmdline #doctest: +SKIP
'fnirt --cout=fieldcoeff.nii --in=structural.nii --infwhm=8,4,2,2 --ref=mni.nii --subsamp=4,2,1,1 --warpres=6,6,6 --iout=warped.nii'
"""
_cmd = 'fnirt'
input_spec = FNIRTInputSpec
output_spec = FNIRTOutputSpec
filemap = {'warped_file': 'warped',
'field_file': 'field',
'jacobian_file': 'field_jacobian',
'modulatedref_file': 'modulated',
'out_intensitymap_file': 'intmap',
'log_file': 'log.txt',
'fieldcoeff_file': 'fieldwarp'}
def _list_outputs(self):
outputs = self.output_spec().get()
for key, suffix in list(self.filemap.items()):
inval = getattr(self.inputs, key)
change_ext = True
if key in ['warped_file', 'log_file']:
if suffix.endswith('.txt'):
change_ext = False
if isdefined(inval):
outputs[key] = inval
else:
outputs[key] = self._gen_fname(self.inputs.in_file,
suffix='_' + suffix,
change_ext=change_ext)
elif isdefined(inval):
if isinstance(inval, bool):
if inval:
outputs[key] = self._gen_fname(self.inputs.in_file,
suffix='_' + suffix,
change_ext=change_ext)
else:
outputs[key] = os.path.abspath(inval)
return outputs
def _format_arg(self, name, spec, value):
if name in list(self.filemap.keys()):
return spec.argstr % self._list_outputs()[name]
return super(FNIRT, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name in ['warped_file', 'log_file']:
return self._list_outputs()[name]
return None
def write_config(self, configfile):
"""Writes out currently set options to specified config file
XX TODO : need to figure out how the config file is written
Parameters
----------
configfile : /path/to/configfile
"""
try:
fid = open(configfile, 'w+')
except IOError:
print ('unable to create config_file %s' % (configfile))
for item in list(self.inputs.get().items()):
fid.write('%s\n' % (item))
fid.close()
class ApplyWarpInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='--in=%s',
mandatory=True, position=0,
desc='image to be warped')
out_file = File(argstr='--out=%s', genfile=True, position=2,
desc='output filename', hash_files=False)
ref_file = File(exists=True, argstr='--ref=%s',
mandatory=True, position=1,
desc='reference image')
field_file = File(exists=True, argstr='--warp=%s',
desc='file containing warp field')
abswarp = traits.Bool(argstr='--abs', xor=['relwarp'],
desc="treat warp field as absolute: x' = w(x)")
relwarp = traits.Bool(argstr='--rel', xor=['abswarp'], position=-1,
desc="treat warp field as relative: x' = x + w(x)")
datatype = traits.Enum(
'char', 'short', 'int', 'float', 'double',
argstr='--datatype=%s',
desc='Force output data type [char short int float double].')
supersample = traits.Bool(
argstr='--super',
desc='intermediary supersampling of output, default is off')
superlevel = traits.Either(
traits.Enum('a'), traits.Int,
argstr='--superlevel=%s',
desc=("level of intermediary supersampling, a for 'automatic' or "
"integer level. Default = 2"))
premat = File(exists=True, argstr='--premat=%s',
desc='filename for pre-transform (affine matrix)')
postmat = File(exists=True, argstr='--postmat=%s',
desc='filename for post-transform (affine matrix)')
mask_file = File(exists=True, argstr='--mask=%s',
desc='filename for mask image (in reference space)')
interp = traits.Enum(
'nn', 'trilinear', 'sinc', 'spline', argstr='--interp=%s', position=-2,
desc='interpolation method')
class ApplyWarpOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='Warped output file')
class ApplyWarp(FSLCommand):
"""Use FSL's applywarp to apply the results of a FNIRT registration
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> aw = fsl.ApplyWarp()
>>> aw.inputs.in_file = example_data('structural.nii')
>>> aw.inputs.ref_file = example_data('mni.nii')
>>> aw.inputs.field_file = 'my_coefficients_filed.nii' #doctest: +SKIP
>>> res = aw.run() #doctest: +SKIP
"""
_cmd = 'applywarp'
input_spec = ApplyWarpInputSpec
output_spec = ApplyWarpOutputSpec
def _format_arg(self, name, spec, value):
if name == 'superlevel':
return spec.argstr % str(value)
return super(ApplyWarp, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
if not isdefined(self.inputs.out_file):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix='_warp')
else:
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
class SliceTimerInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='--in=%s',
mandatory=True, position=0,
desc='filename of input timeseries')
out_file = File(argstr='--out=%s', genfile=True,
desc='filename of output timeseries', hash_files=False)
index_dir = traits.Bool(argstr='--down',
desc='slice indexing from top to bottom')
time_repetition = traits.Float(argstr='--repeat=%f',
desc='Specify TR of data - default is 3s')
slice_direction = traits.Enum(
1, 2, 3, argstr='--direction=%d',
desc='direction of slice acquisition (x=1, y=2, z=3) - default is z')
interleaved = traits.Bool(argstr='--odd',
desc='use interleaved acquisition')
custom_timings = File(
exists=True, argstr='--tcustom=%s',
desc=('slice timings, in fractions of TR, range 0:1 (default is 0.5 = '
'no shift)'))
global_shift = traits.Float(
argstr='--tglobal',
desc='shift in fraction of TR, range 0:1 (default is 0.5 = no shift)')
custom_order = File(
exists=True, argstr='--ocustom=%s',
desc=('filename of single-column custom interleave order file (first '
'slice is referred to as 1 not 0)'))
class SliceTimerOutputSpec(TraitedSpec):
slice_time_corrected_file = File(
exists=True, desc='slice time corrected file')
class SliceTimer(FSLCommand):
""" use FSL slicetimer to perform slice timing correction.
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> st = fsl.SliceTimer()
>>> st.inputs.in_file = example_data('functional.nii')
>>> st.inputs.interleaved = True
>>> result = st.run() #doctest: +SKIP
"""
_cmd = 'slicetimer'
input_spec = SliceTimerInputSpec
output_spec = SliceTimerOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.out_file
if not isdefined(out_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_st')
outputs['slice_time_corrected_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['slice_time_corrected_file']
return None
class SUSANInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='%s',
mandatory=True, position=1,
desc='filename of input timeseries')
brightness_threshold = traits.Float(
argstr='%.10f',
position=2, mandatory=True,
desc=('brightness threshold and should be greater than noise level '
'and less than contrast of edges to be preserved.'))
fwhm = traits.Float(
argstr='%.10f', position=3, mandatory=True,
desc='fwhm of smoothing, in mm, gets converted using sqrt(8*log(2))')
dimension = traits.Enum(3, 2, argstr='%d', position=4, usedefault=True,
desc='within-plane (2) or fully 3D (3)')
use_median = traits.Enum(
1, 0, argstr='%d', position=5, usedefault=True,
desc=('whether to use a local median filter in the cases where '
'single-point noise is detected'))
usans = traits.List(
traits.Tuple(File(exists=True), traits.Float), maxlen=2,
argstr='', position=6, default=[], usedefault=True,
desc='determines whether the smoothing area (USAN) is to be '
'found from secondary images (0, 1 or 2). A negative '
'value for any brightness threshold will auto-set the '
'threshold at 10% of the robust range')
out_file = File(argstr='%s', position=-1, genfile=True,
desc='output file name', hash_files=False)
class SUSANOutputSpec(TraitedSpec):
smoothed_file = File(exists=True, desc='smoothed output file')
class SUSAN(FSLCommand):
""" use FSL SUSAN to perform smoothing
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> anatfile # doctest: +SKIP
anatomical.nii # doctest: +SKIP
>>> sus = fsl.SUSAN()
>>> sus.inputs.in_file = example_data('structural.nii')
>>> sus.inputs.brightness_threshold = 2000.0
>>> sus.inputs.fwhm = 8.0
>>> result = sus.run() # doctest: +SKIP
"""
_cmd = 'susan'
input_spec = SUSANInputSpec
output_spec = SUSANOutputSpec
def _format_arg(self, name, spec, value):
if name == 'fwhm':
return spec.argstr % (float(value) / np.sqrt(8 * np.log(2)))
if name == 'usans':
if not value:
return '0'
arglist = [str(len(value))]
for filename, thresh in value:
arglist.extend([filename, '%.10f' % thresh])
return ' '.join(arglist)
return super(SUSAN, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.out_file
if not isdefined(out_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_smooth')
outputs['smoothed_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['smoothed_file']
return None
class FUGUEInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='--in=%s',
desc='filename of input volume')
shift_in_file = File(exists=True, argstr='--loadshift=%s',
desc='filename for reading pixel shift volume')
phasemap_in_file = File(exists=True, argstr='--phasemap=%s',
desc='filename for input phase image')
fmap_in_file = File(exists=True, argstr='--loadfmap=%s',
desc='filename for loading fieldmap (rad/s)')
unwarped_file = File(argstr='--unwarp=%s',
desc='apply unwarping and save as filename',
xor=['warped_file'], requires=['in_file'])
warped_file = File(argstr='--warp=%s',
desc='apply forward warping and save as filename',
xor=['unwarped_file'], requires=['in_file'])
forward_warping = traits.Bool(
False, usedefault=True,
desc='apply forward warping instead of unwarping')
dwell_to_asym_ratio = traits.Float(argstr='--dwelltoasym=%.10f',
desc='set the dwell to asym time ratio')
dwell_time = traits.Float(
argstr='--dwell=%.10f',
desc=('set the EPI dwell time per phase-encode line - same as echo '
'spacing - (sec)'))
asym_se_time = traits.Float(
argstr='--asym=%.10f',
desc='set the fieldmap asymmetric spin echo time (sec)')
median_2dfilter = traits.Bool(argstr='--median',
desc='apply 2D median filtering')
despike_2dfilter = traits.Bool(argstr='--despike',
desc='apply a 2D de-spiking filter')
no_gap_fill = traits.Bool(
argstr='--nofill',
desc='do not apply gap-filling measure to the fieldmap')
no_extend = traits.Bool(
argstr='--noextend',
desc='do not apply rigid-body extrapolation to the fieldmap')
smooth2d = traits.Float(
argstr='--smooth2=%.2f',
desc='apply 2D Gaussian smoothing of sigma N (in mm)')
smooth3d = traits.Float(
argstr='--smooth3=%.2f',
desc='apply 3D Gaussian smoothing of sigma N (in mm)')
poly_order = traits.Int(argstr='--poly=%d',
desc='apply polynomial fitting of order N')
fourier_order = traits.Int(
argstr='--fourier=%d',
desc='apply Fourier (sinusoidal) fitting of order N')
pava = traits.Bool(argstr='--pava',
desc='apply monotonic enforcement via PAVA')
despike_threshold = traits.Float(
argstr='--despikethreshold=%s',
desc='specify the threshold for de-spiking (default=3.0)')
unwarp_direction = traits.Enum(
'x', 'y', 'z', 'x-', 'y-', 'z-',
argstr='--unwarpdir=%s',
desc='specifies direction of warping (default y)')
phase_conjugate = traits.Bool(
argstr='--phaseconj',
desc='apply phase conjugate method of unwarping')
icorr = traits.Bool(
argstr='--icorr', requires=['shift_in_file'],
desc=('apply intensity correction to unwarping (pixel shift method '
'only)'))
icorr_only = traits.Bool(argstr='--icorronly', requires=['unwarped_file'],
desc='apply intensity correction only')
mask_file = File(exists=True, argstr='--mask=%s',
desc='filename for loading valid mask')
nokspace = traits.Bool(False, argstr='--nokspace',
desc='do not use k-space forward warping')
# Special outputs: shift (voxel shift map, vsm)
save_shift = traits.Bool(False, xor=['save_unmasked_shift'],
desc='write pixel shift volume')
shift_out_file = File(argstr='--saveshift=%s',
desc='filename for saving pixel shift volume')
save_unmasked_shift = traits.Bool(
argstr='--unmaskshift', xor=['save_shift'],
desc='saves the unmasked shiftmap when using --saveshift')
# Special outputs: fieldmap (fmap)
save_fmap = traits.Bool(False, xor=['save_unmasked_fmap'],
desc='write field map volume')
fmap_out_file = File(argstr='--savefmap=%s',
desc='filename for saving fieldmap (rad/s)')
save_unmasked_fmap = traits.Bool(
False, argstr='--unmaskfmap', xor=['save_fmap'],
desc='saves the unmasked fieldmap when using --savefmap')
class FUGUEOutputSpec(TraitedSpec):
unwarped_file = File(desc='unwarped file')
warped_file = File(desc='forward warped file')
shift_out_file = File(desc='voxel shift map file')
fmap_out_file = File(desc='fieldmap file')
class FUGUE(FSLCommand):
"""
`FUGUE <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FUGUE>`_ is, most generally,
a set of tools for EPI distortion correction.
Distortions may be corrected for
1. improving registration with non-distorted images (e.g. structurals),
or
2. dealing with motion-dependent changes.
FUGUE is designed to deal only with the first case -
improving registration.
Examples
--------
Unwarping an input image (shift map is known)
>>> from nipype.interfaces.fsl.preprocess import FUGUE
>>> fugue = FUGUE()
>>> fugue.inputs.in_file = 'epi.nii'
>>> fugue.inputs.mask_file = 'epi_mask.nii'
>>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well
>>> fugue.inputs.unwarp_direction = 'y'
>>> fugue.inputs.output_type = "NIFTI_GZ"
>>> fugue.cmdline # doctest: +ELLIPSIS +IGNORE_UNICODE
'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --unwarp=epi_unwarped.nii.gz'
>>> fugue.run() #doctest: +SKIP
Warping an input image (shift map is known)
>>> from nipype.interfaces.fsl.preprocess import FUGUE
>>> fugue = FUGUE()
>>> fugue.inputs.in_file = 'epi.nii'
>>> fugue.inputs.forward_warping = True
>>> fugue.inputs.mask_file = 'epi_mask.nii'
>>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well
>>> fugue.inputs.unwarp_direction = 'y'
>>> fugue.inputs.output_type = "NIFTI_GZ"
>>> fugue.cmdline # doctest: +ELLIPSIS +IGNORE_UNICODE
'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --warp=epi_warped.nii.gz'
>>> fugue.run() #doctest: +SKIP
Computing the vsm (unwrapped phase map is known)
>>> from nipype.interfaces.fsl.preprocess import FUGUE
>>> fugue = FUGUE()
>>> fugue.inputs.phasemap_in_file = 'epi_phasediff.nii'
>>> fugue.inputs.mask_file = 'epi_mask.nii'
>>> fugue.inputs.dwell_to_asym_ratio = (0.77e-3 * 3) / 2.46e-3
>>> fugue.inputs.unwarp_direction = 'y'
>>> fugue.inputs.save_shift = True
>>> fugue.inputs.output_type = "NIFTI_GZ"
>>> fugue.cmdline # doctest: +ELLIPSIS +IGNORE_UNICODE
'fugue --dwelltoasym=0.9390243902 --mask=epi_mask.nii --phasemap=epi_phasediff.nii --saveshift=epi_phasediff_vsm.nii.gz --unwarpdir=y'
>>> fugue.run() #doctest: +SKIP
"""
_cmd = 'fugue'
input_spec = FUGUEInputSpec
output_spec = FUGUEOutputSpec
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
input_phase = isdefined(self.inputs.phasemap_in_file)
input_vsm = isdefined(self.inputs.shift_in_file)
input_fmap = isdefined(self.inputs.fmap_in_file)
if not input_phase and not input_vsm and not input_fmap:
raise RuntimeError(
('Either phasemap_in_file, shift_in_file or fmap_in_file must '
'be set.'))
if not isdefined(self.inputs.in_file):
skip += ['unwarped_file', 'warped_file']
else:
if self.inputs.forward_warping:
skip += ['unwarped_file']
trait_spec = self.inputs.trait('warped_file')
trait_spec.name_template = "%s_warped"
trait_spec.name_source = 'in_file'
trait_spec.output_name = 'warped_file'
else:
skip += ['warped_file']
trait_spec = self.inputs.trait('unwarped_file')
trait_spec.name_template = "%s_unwarped"
trait_spec.name_source = 'in_file'
trait_spec.output_name = 'unwarped_file'
# Handle shift output
if not isdefined(self.inputs.shift_out_file):
vsm_save_masked = (isdefined(self.inputs.save_shift) and
self.inputs.save_shift)
vsm_save_unmasked = (isdefined(self.inputs.save_unmasked_shift) and
self.inputs.save_unmasked_shift)
if (vsm_save_masked or vsm_save_unmasked):
trait_spec = self.inputs.trait('shift_out_file')
trait_spec.output_name = 'shift_out_file'
if input_fmap:
trait_spec.name_source = 'fmap_in_file'
elif input_phase:
trait_spec.name_source = 'phasemap_in_file'
elif input_vsm:
trait_spec.name_source = 'shift_in_file'
else:
raise RuntimeError(
('Either phasemap_in_file, shift_in_file or '
'fmap_in_file must be set.'))
if vsm_save_unmasked:
trait_spec.name_template = '%s_vsm_unmasked'
else:
trait_spec.name_template = '%s_vsm'
else:
skip += ['save_shift', 'save_unmasked_shift', 'shift_out_file']
# Handle fieldmap output
if not isdefined(self.inputs.fmap_out_file):
fmap_save_masked = (isdefined(self.inputs.save_fmap) and
self.inputs.save_fmap)
fmap_save_unmasked = (isdefined(self.inputs.save_unmasked_fmap) and
self.inputs.save_unmasked_fmap)
if (fmap_save_masked or fmap_save_unmasked):
trait_spec = self.inputs.trait('fmap_out_file')
trait_spec.output_name = 'fmap_out_file'
if input_vsm:
trait_spec.name_source = 'shift_in_file'
elif input_phase:
trait_spec.name_source = 'phasemap_in_file'
elif input_fmap:
trait_spec.name_source = 'fmap_in_file'
else:
raise RuntimeError(
('Either phasemap_in_file, shift_in_file or '
'fmap_in_file must be set.'))
if fmap_save_unmasked:
trait_spec.name_template = '%s_fieldmap_unmasked'
else:
trait_spec.name_template = '%s_fieldmap'
else:
skip += ['save_fmap', 'save_unmasked_fmap', 'fmap_out_file']
return super(FUGUE, self)._parse_inputs(skip=skip)
class PRELUDEInputSpec(FSLCommandInputSpec):
complex_phase_file = File(exists=True, argstr='--complex=%s',
mandatory=True, xor=[
'magnitude_file', 'phase_file'],
desc='complex phase input volume')
magnitude_file = File(exists=True, argstr='--abs=%s',
mandatory=True,
xor=['complex_phase_file'],
desc='file containing magnitude image')
phase_file = File(exists=True, argstr='--phase=%s',
mandatory=True,
xor=['complex_phase_file'],
desc='raw phase file')
unwrapped_phase_file = File(genfile=True,
argstr='--unwrap=%s',
desc='file containing unwrapepd phase',
hash_files=False)
num_partitions = traits.Int(argstr='--numphasesplit=%d',
desc='number of phase partitions to use')
labelprocess2d = traits.Bool(
argstr='--labelslices',
desc='does label processing in 2D (slice at a time)')
process2d = traits.Bool(argstr='--slices',
xor=['labelprocess2d'],
desc='does all processing in 2D (slice at a time)')
process3d = traits.Bool(argstr='--force3D',
xor=['labelprocess2d', 'process2d'],
desc='forces all processing to be full 3D')
threshold = traits.Float(argstr='--thresh=%.10f',
desc='intensity threshold for masking')
mask_file = File(exists=True, argstr='--mask=%s',
desc='filename of mask input volume')
start = traits.Int(argstr='--start=%d',
desc='first image number to process (default 0)')
end = traits.Int(argstr='--end=%d',
desc='final image number to process (default Inf)')
savemask_file = File(argstr='--savemask=%s',
desc='saving the mask volume', hash_files=False)
rawphase_file = File(argstr='--rawphase=%s',
desc='saving the raw phase output', hash_files=False)
label_file = File(argstr='--labels=%s',
desc='saving the area labels output', hash_files=False)
removeramps = traits.Bool(argstr='--removeramps',
desc='remove phase ramps during unwrapping')
class PRELUDEOutputSpec(TraitedSpec):
unwrapped_phase_file = File(exists=True,
desc='unwrapped phase file')
class PRELUDE(FSLCommand):
"""Use FSL prelude to do phase unwrapping
Examples
--------
Please insert examples for use of this command
"""
input_spec = PRELUDEInputSpec
output_spec = PRELUDEOutputSpec
_cmd = 'prelude'
def __init__(self, **kwargs):
super(PRELUDE, self).__init__(**kwargs)
warn('This has not been fully tested. Please report any failures.')
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.unwrapped_phase_file
if not isdefined(out_file):
if isdefined(self.inputs.phase_file):
out_file = self._gen_fname(self.inputs.phase_file,
suffix='_unwrapped')
elif isdefined(self.inputs.complex_phase_file):
out_file = self._gen_fname(self.inputs.complex_phase_file,
suffix='_phase_unwrapped')
outputs['unwrapped_phase_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'unwrapped_phase_file':
return self._list_outputs()['unwrapped_phase_file']
return None
class FIRSTInputSpec(FSLCommandInputSpec):
in_file = File(
exists=True, mandatory=True, position=-2, copyfile=False,
argstr='-i %s', desc='input data file')
out_file = File(
'segmented', usedefault=True, mandatory=True, position=-1,
argstr='-o %s', desc='output data file', hash_files=False)
verbose = traits.Bool(argstr='-v', position=1,
desc="Use verbose logging.")
brain_extracted = traits.Bool(
argstr='-b', position=2,
desc="Input structural image is already brain-extracted")
no_cleanup = traits.Bool(
argstr='-d', position=3,
desc="Input structural image is already brain-extracted")
method = traits.Enum(
'auto', 'fast', 'none', xor=['method_as_numerical_threshold'],
argstr='-m %s', position=4, usedefault=True,
desc=("Method must be one of auto, fast, none, or it can be entered "
"using the 'method_as_numerical_threshold' input"))
method_as_numerical_threshold = traits.Float(
argstr='-m %.4f', position=4,
desc=("Specify a numerical threshold value or use the 'method' input "
"to choose auto, fast, or none"))
list_of_specific_structures = traits.List(
traits.Str, argstr='-s %s', sep=',', position=5, minlen=1,
desc='Runs only on the specified structures (e.g. L_Hipp, R_Hipp'
'L_Accu, R_Accu, L_Amyg, R_Amyg'
'L_Caud, R_Caud, L_Pall, R_Pall'
'L_Puta, R_Puta, L_Thal, R_Thal, BrStem')
affine_file = File(
exists=True, position=6, argstr='-a %s',
desc=('Affine matrix to use (e.g. img2std.mat) (does not '
're-run registration)'))
class FIRSTOutputSpec(TraitedSpec):
vtk_surfaces = OutputMultiPath(
File(exists=True),
desc='VTK format meshes for each subcortical region')
bvars = OutputMultiPath(
File(exists=True),
desc='bvars for each subcortical region')
original_segmentations = File(
exists=True, desc=('3D image file containing the segmented regions '
'as integer values. Uses CMA labelling'))
segmentation_file = File(
exists=True, desc=('4D image file containing a single volume per '
'segmented region'))
class FIRST(FSLCommand):
"""
Use FSL's run_first_all command to segment subcortical volumes
http://www.fmrib.ox.ac.uk/fsl/first/index.html
Examples
--------
>>> from nipype.interfaces import fsl
>>> first = fsl.FIRST()
>>> first.inputs.in_file = 'structural.nii'
>>> first.inputs.out_file = 'segmented.nii'
>>> res = first.run() #doctest: +SKIP
"""
_cmd = 'run_first_all'
input_spec = FIRSTInputSpec
output_spec = FIRSTOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.list_of_specific_structures):
structures = self.inputs.list_of_specific_structures
else:
structures = ['L_Hipp', 'R_Hipp',
'L_Accu', 'R_Accu',
'L_Amyg', 'R_Amyg',
'L_Caud', 'R_Caud',
'L_Pall', 'R_Pall',
'L_Puta', 'R_Puta',
'L_Thal', 'R_Thal',
'BrStem']
outputs['original_segmentations'] = \
self._gen_fname('original_segmentations')
outputs['segmentation_file'] = self._gen_fname('segmentation_file')
outputs['vtk_surfaces'] = self._gen_mesh_names('vtk_surfaces',
structures)
outputs['bvars'] = self._gen_mesh_names('bvars', structures)
return outputs
def _gen_fname(self, name):
path, outname, ext = split_filename(self.inputs.out_file)
method = 'none'
if isdefined(self.inputs.method) and self.inputs.method != 'none':
method = 'fast'
if (self.inputs.list_of_specific_structures and
self.inputs.method == 'auto'):
method = 'none'
if isdefined(self.inputs.method_as_numerical_threshold):
thres = '%.4f' % self.inputs.method_as_numerical_threshold
method = thres.replace('.', '')
if name == 'original_segmentations':
return op.abspath('%s_all_%s_origsegs.nii.gz' % (outname, method))
if name == 'segmentation_file':
return op.abspath('%s_all_%s_firstseg.nii.gz' % (outname, method))
return None
def _gen_mesh_names(self, name, structures):
path, prefix, ext = split_filename(self.inputs.out_file)
if name == 'vtk_surfaces':
vtks = list()
for struct in structures:
vtk = prefix + '-' + struct + '_first.vtk'
vtks.append(op.abspath(vtk))
return vtks
if name == 'bvars':
bvars = list()
for struct in structures:
bvar = prefix + '-' + struct + '_first.bvars'
bvars.append(op.abspath(bvar))
return bvars
return None
|
carolFrohlich/nipype
|
nipype/interfaces/fsl/preprocess.py
|
Python
|
bsd-3-clause
| 75,161
|
[
"Gaussian",
"VTK"
] |
ccde6d76ec02cac8bf506136b6ec64a2e930b22517538a0040ca51c7e596e0b4
|
# https://clifford.readthedocs.io/en/latest/SpaceTimeAlgebra.html
from . import Cl, BladeMap
# Dirac Algebra `D`
D, D_blades = Cl(1, 3, firstIdx=0, names='d')
# Pauli Algebra `P`
P, P_blades = Cl(3, names='p')
# put elements of each in namespace
locals().update(D_blades)
locals().update(P_blades)
bm = BladeMap([(d01, p1),
(d02, p2),
(d03, p3),
(d12, p12),
(d23, p23),
(d13, p13),
(d0123, p123)])
def split(X):
'''
implements the spacetime split.
'''
return bm(X.odd*d0+X.even)
|
arsenovic/clifford
|
clifford/sta.py
|
Python
|
bsd-3-clause
| 594
|
[
"DIRAC"
] |
db632d39783a7a159eddd548a7e86eec7306b27c482664a9584b559f1abe971e
|
from vtk import *
import os.path
data_dir = "../../../../VTKData/Data/Infovis/SQLite/"
if not os.path.exists(data_dir):
data_dir = "../../../../../VTKData/Data/Infovis/SQLite/"
if not os.path.exists(data_dir):
data_dir = "../../../../../../VTKData/Data/Infovis/SQLite/"
sqlite_file = data_dir + "temperatures.db"
# Pull the table from the database
databaseToTable = vtkSQLDatabaseTableSource()
databaseToTable.SetURL("sqlite://" + sqlite_file)
databaseToTable.SetQuery("select * from main_tbl")
# How many rows does the input table have?
databaseToTable.Update()
inputTable = databaseToTable.GetOutput()
numRows = inputTable.GetNumberOfRows()
print "Input Table Rows: " , numRows
# Divide the table into 4
# Note1: using data specific method
subSize = numRows/4
leftOver = numRows - subSize*4
print "subSize: ", subSize
print "leftOver: ", leftOver
# Python knows nothing of enumerated types
# 6 = vtkDataObject::FIELD_ASSOCIATION_ROWS
# Mode = ACCEPT_LESS_THAN = 0, ACCEPT_GREATER_THAN = 1, ACCEPT_BETWEEN = 2, ACCEPT_OUTSIDE = 3
threshold = vtkThresholdTable()
threshold.SetInputConnection(databaseToTable.GetOutputPort())
threshold.SetInputArrayToProcess(0,0,0,6, "id")
threshold.SetMode(2)
threshold.SetMinValue(vtkVariant(0))
threshold.SetMaxValue(vtkVariant(subSize-1))
threshold.Update()
subTable1 = vtkTable()
subTable1.DeepCopy(threshold.GetOutput())
subTable1.Dump(10)
threshold.SetMinValue(vtkVariant(subSize))
threshold.SetMaxValue(vtkVariant(subSize*2-1))
threshold.Update()
subTable2 = vtkTable()
subTable2.DeepCopy(threshold.GetOutput())
subTable2.Dump(10)
threshold.SetMinValue(vtkVariant(subSize*2))
threshold.SetMaxValue(vtkVariant(subSize*3-1))
threshold.Update()
subTable3 = vtkTable()
subTable3.DeepCopy(threshold.GetOutput())
threshold.SetMinValue(vtkVariant(subSize*3))
threshold.SetMaxValue(vtkVariant(subSize*4+leftOver-1))
threshold.Update()
subTable4 = vtkTable()
subTable4.DeepCopy(threshold.GetOutput())
print "SubTable1 Rows: " , subTable1.GetNumberOfRows()
print "SubTable2 Rows: " , subTable2.GetNumberOfRows()
print "SubTable3 Rows: " , subTable3.GetNumberOfRows()
print "SubTable4 Rows: " , subTable4.GetNumberOfRows()
# Calculate offline(non-streaming) descriptive statistics
print "# Calculate offline descriptive statistics:"
ds = vtkDescriptiveStatistics()
ds.SetInputConnection(databaseToTable.GetOutputPort())
ds.AddColumn("Temp1")
ds.AddColumn("Temp2")
ds.Update()
dStats = ds.GetOutputDataObject( 1 )
dPrimary = dStats.GetBlock( 0 )
dDerived = dStats.GetBlock( 1 )
dPrimary.Dump( 15 )
dDerived.Dump( 15 )
inter = vtkDescriptiveStatistics()
inter.AddColumn("Temp1")
inter.AddColumn("Temp2")
# Calculate online(streaming) descriptive statistics
print "# Calculate online descriptive statistics:"
ss = vtkStreamingStatistics()
ss.SetStatisticsAlgorithm(inter)
ss.SetInputData(subTable1)
ss.Update()
ss.SetInputData(subTable2)
ss.Update()
ss.SetInputData(subTable3)
ss.Update()
ss.SetInputData(subTable4)
ss.Update()
sStats = ss.GetOutputDataObject( 1 )
sPrimary = sStats.GetBlock( 0 )
sDerived = sStats.GetBlock( 1 )
sPrimary.Dump( 15 )
sDerived.Dump( 15 )
|
collects/VTK
|
Examples/Infovis/Python/streaming_statistics.py
|
Python
|
bsd-3-clause
| 3,118
|
[
"VTK"
] |
d195ce278272a045b36090f62cbc074d356df3b75ebb308e999ac4b270d77433
|
#!/usr/bin/env python
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Rob Knight", "Greg Caporaso", "Kyle Bittinger", "Jens Reeder",
"William Walters", "Jose Carlos Clemente Litran",
"Adam Robbins-Pianka", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
"""Contains code for OTU picking, using several techniques.
This module has the responsibility for taking a set of sequences and
grouping those sequences by similarity.
"""
from copy import copy
from itertools import ifilter
from os.path import splitext, split, abspath, join, dirname
from os import makedirs, close, rename
from itertools import imap
from tempfile import mkstemp
from bfillings.mothur import parse_otu_list as mothur_parse
from skbio.util import remove_files, flatten
from skbio.tree import CompressedTrie, fasta_to_pairlist
from skbio.parse.sequences import parse_fasta
from skbio.alignment import SequenceCollection
from skbio.sequence import DNA
from qiime.util import FunctionWithParams, get_qiime_temp_dir
from qiime.sort import sort_fasta_by_abundance
from qiime.parse import fields_to_dict
from bfillings.blast import blast_seqs, Blastall, BlastResult
from bfillings.formatdb import build_blast_db_from_fasta_path
from bfillings.mothur import Mothur
from bfillings.cd_hit import cdhit_clusters_from_seqs
from bfillings.uclust import get_clusters_from_fasta_filepath
from bfillings.sortmerna_v2 import (build_database_sortmerna,
sortmerna_ref_cluster)
from bfillings.usearch import (usearch_qf,
usearch61_denovo_cluster,
usearch61_ref_cluster)
from bfillings.sumaclust_v1 import sumaclust_denovo_cluster
from bfillings.swarm_v127 import swarm_denovo_cluster
class OtuPicker(FunctionWithParams):
"""An OtuPicker dereplicates a set of sequences at a given similarity.
This is an abstract class: subclasses should implement the __call__
method.
"""
Name = 'OtuPicker'
def __init__(self, params):
"""Return new OtuPicker object with specified params.
Note: expect params to contain both generic and per-method (e.g. for
cdhit) params, so leaving it as a dict rather than setting
attributes. Some standard entries in params are:
Algorithm: algorithm used (e.g. nearest-neighbor, furthest-neighbor)
Similarity: similarity threshold, e.g. 0.97
Application: 3rd-party application used, if any, e.g. cdhit
"""
self.Params = params
def __call__(self, seq_path, result_path=None, log_path=None):
"""Returns dict mapping {otu_id:[seq_ids]} for each otu.
Parameters:
seq_path: path to file of sequences
result_path: path to file of results. If specified, should
dump the result to the desired path instead of returning it.
log_path: path to log, which should include dump of params.
"""
raise NotImplementedError("OtuPicker is an abstract class")
def _prefilter_exact_prefixes(self, seqs, prefix_length=100):
"""
"""
unique_prefixes = {}
for seq_id, seq in seqs:
seq_len = len(seq)
seq_id = seq_id.split()[0]
current_prefix = seq[:prefix_length]
try:
prefix_data = unique_prefixes[current_prefix]
if seq_len > prefix_data[2]:
# if this is the longest seq with this prefix so far,
# update the list of seq_ids, the best seq_len, and the
# best hit seq_id
prefix_data[0].append(seq_id)
prefix_data[1] = seq_id
prefix_data[2] = seq_len
prefix_data[3] = seq
else:
# if longer have been seen, only update the list of seq_ids
prefix_data[0].append(seq_id)
except KeyError:
# list of seq_ids mapped to this prefix, best hit seq_id, best
# hit seq_len
unique_prefixes[current_prefix] = [[seq_id],
seq_id,
seq_len,
seq]
# construct the result objects
filtered_seqs = []
seq_id_map = {}
for data in unique_prefixes.values():
filtered_seqs.append((data[1], data[3]))
seq_id_map[data[1]] = data[0]
return filtered_seqs, seq_id_map
def _prefilter_exact_matches(self, seqs):
"""
"""
unique_sequences = {}
seq_id_map = {}
filtered_seqs = []
for seq_id, seq in seqs:
seq_id = seq_id.split()[0]
try:
temp_seq_id = unique_sequences[seq]
except KeyError:
# unseen sequence so create a new temp_seq_id,
# a new unique_sequence entry, and new seq_id_map
# entry, and add the sequence to the list of
# filtered seqs -- this will retain the order
# of the input sequences too
temp_seq_id = 'QiimeExactMatch.%s' % seq_id
unique_sequences[seq] = temp_seq_id
seq_id_map[temp_seq_id] = []
filtered_seqs.append((temp_seq_id, seq))
seq_id_map[temp_seq_id].append(seq_id)
return filtered_seqs, seq_id_map
def _prefilter_with_trie(self, seq_path):
trunc_id = lambda a_b: (a_b[0].split()[0], a_b[1])
# get the prefix map
with open(seq_path, 'U') as seq_lines:
t = CompressedTrie(fasta_to_pairlist(imap(trunc_id,
parse_fasta(seq_lines))))
mapping = t.prefix_map
for key in mapping.keys():
mapping[key].append(key)
# collect the representative seqs
filtered_seqs = []
for (label, seq) in parse_fasta(open(seq_path)):
label = label.split()[0]
if label in mapping:
filtered_seqs.append((label, seq))
return filtered_seqs, mapping
def _map_filtered_clusters_to_full_clusters(self, clusters, filter_map):
"""
Input: clusters, a list of cluster lists
filter_map, the seq_id in each clusters
is the key to the filter_map
containing all seq_ids with
duplicate FASTA sequences
Output: an extended list of cluster lists
"""
results = []
for cluster in clusters:
full_cluster = []
for seq_id in cluster:
full_cluster += filter_map[seq_id]
results.append(full_cluster)
return results
class SortmernaV2OtuPicker(OtuPicker):
""" SortMeRNA-based version 2 OTU picker: clusters queries by their 'best'
alignment to a reference seed.
The 'best' alignment for a query is the one with:
1. the lowest E-value score (at most 1)
2. percent sequence identity greater than or equal to the OTU
similarity threshold (default in Params['similarity'] = 0.97)
3. percent query coverage greater than or equal to the OTU
coverage threshold (default in Params['coverage'] = 0.97)
"""
def __init__(self, params):
""" Return a new SortmernaV2OtuPicker object with specified params.
"""
OtuPicker.__init__(self, params)
def __call__(self, seq_path, result_path=None, log_path=None,
sortmerna_db=None, refseqs_fp=None, failure_path=None):
""" Purpose : Call to construct the reference database (if not provided)
and to launch sortmerna.
Parameters: seq_path, path to reads file;
result_path, path to OTU mapping file;
log_path, path to QIIME log file;
sortmerna_db, path to sortmerna indexed database;
refseqs_fp, path to reference sequences;
failure_path, path to text file of reads failing
to align with similarity & coverage thresholds;
Return : None (output is always written to file)
"""
self.log_lines = []
prefilter_identical_sequences =\
self.Params['prefilter_identical_sequences']
# Indexed database not provided, build it
if not sortmerna_db:
# write index to output directory
self.sortmerna_db, self.files_to_remove = \
build_database_sortmerna(abspath(refseqs_fp),
max_pos=self.Params['max_pos'],
output_dir=dirname(abspath(result_path)))
self.log_lines.append('Reference seqs fp (to build '
'sortmerna database): %s' %
abspath(refseqs_fp))
# Indexed database provided
else:
self.sortmerna_db = sortmerna_db
self.files_to_remove = []
self.log_lines.append('SortMeRNA database: %s' % self.sortmerna_db)
original_fasta_path = seq_path
# Collapse identical sequences to a new file
if prefilter_identical_sequences:
exact_match_id_map, seq_path =\
self._apply_identical_sequences_prefilter(seq_path)
# Call sortmerna for reference clustering
cluster_map, failures, smr_files_to_remove =\
sortmerna_ref_cluster(seq_path=seq_path,
sortmerna_db=self.sortmerna_db,
refseqs_fp=refseqs_fp,
result_path=result_path,
tabular=self.Params['blast'],
max_e_value=self.Params['max_e_value'],
similarity=self.Params['similarity'],
coverage=self.Params['coverage'],
threads=self.Params['threads'],
best=self.Params['best'],
HALT_EXEC=False)
# Remove temporary files
self.files_to_remove.extend(smr_files_to_remove)
remove_files(self.files_to_remove, error_on_missing=False)
# Expand identical sequences to create full OTU map
if prefilter_identical_sequences:
cluster_names = cluster_map.keys()
clusters = [cluster_map[c] for c in cluster_names]
clusters =\
self._map_filtered_clusters_to_full_clusters(
clusters, exact_match_id_map)
cluster_map = dict(zip(cluster_names, clusters))
# Expand failures
temp_failures = []
for fa in failures:
temp_failures.extend(exact_match_id_map[fa])
failures = temp_failures
self.log_lines.append('Num OTUs: %d' % len(cluster_map))
self.log_lines.append('Num failures: %d' % len(failures))
# Write failures to file
if failure_path is not None:
failure_file = open(failure_path, 'w')
failure_file.write('\n'.join(failures))
failure_file.write('\n')
failure_file.close()
# Write OTU map
if result_path:
# If the user provided a result_path, write the
# results to file with one tab-separated line per
# cluster (this will over-write the default SortMeRNA
# OTU map with the extended OTU map)
of = open(result_path, 'w')
for cluster_id, cluster in cluster_map.items():
of.write('%s\t%s\n' % (cluster_id, '\t'.join(cluster)))
of.close()
result = None
self.log_lines.append('Result path: %s\n' % result_path)
else:
# if the user did not provide a result_path, store
# the clusters in a dict of {otu_id:[seq_ids]}, where
# otu_id is arbitrary
result = cluster_map
self.log_lines.append('Result path: None, returned as dict.')
# Log the run
if log_path:
log_file = open(log_path, 'w')
self.log_lines = [str(self)] + self.log_lines
log_file.write('\n'.join(self.log_lines))
log_file.write('\n')
return result
def _apply_identical_sequences_prefilter(self, seq_path):
"""
Input : a filepath to input FASTA reads
Method: prepares and writes de-replicated reads
to a temporary FASTA file, calls
parent method to do the actual
de-replication
Return: exact_match_id_map, a dictionary storing
de-replicated amplicon ID as key and
all original FASTA IDs with identical
sequences as values;
unique_seqs_fp, filepath to FASTA file
holding only de-replicated sequences
"""
# Creating mapping for de-replicated reads
with open(seq_path, 'U') as s_path:
seqs_to_cluster, exact_match_id_map =\
self._prefilter_exact_matches(parse_fasta(s_path))
# Create temporary file for storing the de-replicated reads
fd, unique_seqs_fp = mkstemp(
prefix='SortMeRNAExactMatchFilter', suffix='.fasta')
close(fd)
self.files_to_remove.append(unique_seqs_fp)
# Write de-replicated reads to file
unique_seqs_f = open(unique_seqs_fp, 'w')
for seq_id, seq in seqs_to_cluster:
unique_seqs_f.write('>%s count=%d;\n%s\n' %
(seq_id,
len(exact_match_id_map[seq_id]),
seq))
unique_seqs_f.close()
# Clean up the seqs_to_cluster as we don't need
# it again
del(seqs_to_cluster)
return exact_match_id_map, unique_seqs_fp
class BlastOtuPicker(OtuPicker):
"""Blast-based OTU picker: clusters sequence by their 'best' blast hit.
The 'best blast hit' for a sequence is defined as the database
sequence which achieves the longest alignment with percent sequence
identity greater than or equal to the OTU similarity threshold
(default in Params['Similarity'] = 0.97). Database hits must have an
e-value threshold less than or equal to the max_e_value threshold
(default in Params['max_e_value'] as 1e-10).
"""
def __init__(self, params):
"""Return new BlastOtuPicker object with specified params.
"""
_params = {'max_e_value': 1e-10,
'seqs_per_blast_run': 1000,
'Similarity': 0.97,
'min_aligned_percent': 0.50,
'blast_program': 'blastn',
'is_protein': False}
_params.update(params)
OtuPicker.__init__(self, _params)
def __call__(self, seq_path, result_path=None, log_path=None,
blast_db=None, refseqs_fp=None):
self.log_lines = []
if not blast_db:
self.blast_db, self.db_files_to_remove = \
build_blast_db_from_fasta_path(abspath(refseqs_fp),
is_protein=self.Params[
'is_protein'],
output_dir=get_qiime_temp_dir())
self.log_lines.append('Reference seqs fp (to build blast db): %s' %
abspath(refseqs_fp))
else:
self.blast_db = blast_db
self.db_files_to_remove = []
self.log_lines.append('Blast database: %s' % self.blast_db)
clusters, failures = self._cluster_seqs(parse_fasta(open(seq_path)))
self.log_lines.append('Num OTUs: %d' % len(clusters))
if result_path:
# if the user provided a result_path, write the
# results to file with one tab-separated line per
# cluster
of = open(result_path, 'w')
for cluster_id, cluster in clusters.items():
of.write('%s\t%s\n' % (cluster_id, '\t'.join(cluster)))
of.close()
result = None
self.log_lines.append('Result path: %s\n' % result_path)
else:
# if the user did not provide a result_path, store
# the clusters in a dict of {otu_id:[seq_ids]}, where
# otu_id is arbitrary
result = clusters
self.log_lines.append('Result path: None, returned as dict.')
if log_path:
# if the user provided a log file path, log the run
log_file = open(log_path, 'w')
self.log_lines = [str(self)] + self.log_lines
log_file.write('\n'.join(self.log_lines))
failures.sort()
log_file.write('Num failures: %d\n' % len(failures))
log_file.write('Failures: %s\n' % '\t'.join(failures))
remove_files(self.db_files_to_remove, error_on_missing=False)
# return the result (note this is None if the data was
# written to file)
return result
def _cluster_seqs(self, seqs):
"""
"""
# blast seqs seq_per_blast_run at a time
# Build object to keep track of the current set of sequences to be
# blasted, and the results (i.e., seq_id -> (taxonomy,quaility score)
# mapping)
seqs_per_blast_run = self.Params['seqs_per_blast_run']
current_seqs = []
result = {}
failures = []
# Iterate over the (seq_id, seq) pairs
for seq_id, seq in seqs:
# append the current seq_id,seq to list of seqs to be blasted
current_seqs.append((seq_id, seq))
# When there are self.SeqsPerBlastRun in the list, blast them
if len(current_seqs) == seqs_per_blast_run:
# update the result object
current_clusters, current_failures =\
self._blast_seqs(current_seqs)
result = self._update_cluster_map(result, current_clusters)
failures += current_failures
# reset the list of seqs to be blasted
current_seqs = []
# Cluster the remaining sequences
current_clusters, current_failures = self._blast_seqs(current_seqs)
result = self._update_cluster_map(result, current_clusters)
failures += current_failures
return result, failures
def _update_cluster_map(self, cluster_map, new_clusters):
for cluster_id, seq_ids in new_clusters.items():
try:
cluster_map[cluster_id] += seq_ids
except KeyError:
cluster_map[cluster_id] = seq_ids
return cluster_map
def _blast_seqs(self, seqs):
"""
"""
result = {}
failures = []
if not seqs:
return result, failures
# Get the blast hits with e-values less than self.Params['max_e_value']
# and percent identity greater than self.Params['Similarity']
blast_hits = get_blast_hits(seqs, self.blast_db,
max_e_value=self.Params['max_e_value'],
min_pct_identity=self.Params['Similarity'],
min_aligned_percent=self.Params[
'min_aligned_percent'],
blast_program=self.Params['blast_program'])
# Choose the longest alignment out of the acceptable blast hits --
# the result will therefore be the blast hit with at least
# self.Params['Similarity'] percent identity to the input sequence
seq_id_to_best_blast_hit = \
self._choose_longest_blast_hit(blast_hits)
for seq_id, blast_hit in seq_id_to_best_blast_hit.items():
if blast_hit is None:
failures.append(seq_id)
else:
cluster_id = blast_hit['SUBJECT ID']
try:
result[cluster_id].append(seq_id)
except KeyError:
result[cluster_id] = [seq_id]
return result, failures
def _choose_longest_blast_hit(self, blast_hits):
""" choose the longest blast match
This function assumes that the blast_hits below
self.Params['Similarity'] have already been filtered out,
and therefore the longest alignment is the best blast pick.
"""
result = {}
# iterate over the queries and their acceptable blast hits
for query, blast_hits in blast_hits.items():
choice = None
len_longest = 0
# iterate over the acceptable blast hits
for blast_hit in blast_hits:
# if the alignment is the longest we've seen so far (or
# the first), hold on to it as a possible best hit
len_current = blast_hit['ALIGNMENT LENGTH']
if len_current > len_longest:
choice = blast_hit
len_longest = len_current
query = query.split()[0] # get rid of spaces
result[query] = choice
return result
class BlastxOtuPicker(BlastOtuPicker):
"""Blastx-based OTU picker: clusters sequence by their 'best' blast hit.
The 'best blast hit' for a sequence is defined as the database
sequence which achieves the longest alignment with percent sequence
identity greater than or equal to the OTU similarity threshold
(default in Params['Similarity'] = 0.97). Database hits must have an
e-value threshold less than or equal to the max_e_value threshold
(default in Params['max_e_value'] as 1e-10).
"""
def __init__(self, params):
"""Return new BlastOtuPicker object with specified params.
"""
_params = {'max_e_value': 1e-3,
'seqs_per_blast_run': 1000,
'Similarity': 0.75,
'min_aligned_percent': 0.50,
'blast_program': 'blastx',
'is_protein': True}
_params.update(params)
OtuPicker.__init__(self, _params)
# START MOVE TO BLAST APP CONTROLLER
# The following two functions should be move to the blast application
# controller. When that's done, qiime.assign_taxonomy needs to be updated
# to use these functions rather that the member functions which these
# are replicas of. Note that when moving to the blast app controller,
# tests should be extractable from test_assign_taxonomy.py.
# THIS FUNCTION SHOULD DO THE SeqsPerBlastRun splitting, would be _much_
# cleaner that way.
def get_blast_hits(seqs,
blast_db,
max_e_value=1e-10,
min_pct_identity=0.75,
min_aligned_percent=0.50,
blast_program='blastn'):
""" blast each seq in seqs against blast_db and retain good hits
"""
max_evalue = max_e_value
min_percent_identity = min_pct_identity
seq_ids = [s[0] for s in seqs]
result = {}
blast_result = blast_seqs(
seqs, Blastall, blast_db=blast_db,
params={'-p': blast_program, '-n': 'F'},
add_seq_names=False)
if blast_result['StdOut']:
lines = [x for x in blast_result['StdOut']]
blast_result = BlastResult(lines)
else:
return {}.fromkeys(seq_ids, [])
for seq_id, seq in seqs:
blast_result_id = seq_id.split()[0]
max_alignment_length = len(seq)
if blast_program == 'blastx':
# if this is a translated blast search, the max alignment
# length is the number of 3mers in seq
max_alignment_length /= 3
min_alignment_length = max_alignment_length * min_aligned_percent
result[seq_id] = []
if blast_result_id in blast_result:
for e in blast_result[blast_result_id][0]:
if (float(e['E-VALUE']) <= max_evalue and
float(e['% IDENTITY']) / 100. >= min_percent_identity and
int(e['ALIGNMENT LENGTH']) >= min_alignment_length):
result[seq_id].append(e)
return result
# END MOVE TO BLAST APP CONTROLLER
class SumaClustOtuPicker(OtuPicker):
""" SumaClust is a de novo OTU picker, following the same clustering
algorithm as Uclust. It is open source and supports multithreading,
both SIMD and OpenMP.
Clusters are created by their similarity threshold (default 0.97).
If a query does not match any seed with this similarity threshold,
it is used to create a new seed.
Exact clustering (with parameter -e) assigns queries to the
best-matching seed, rather than to the first seed with similarity
threshold.
"""
def __init__(self, params):
""" Return a new SumaClustOtuPicker object with specified params.
The defaults are set in the SumaClust API (see bfillings)
"""
OtuPicker.__init__(self, params)
def _apply_identical_sequences_prefilter(self, seq_path):
"""
Input : a filepath to input FASTA reads
Method: prepares and writes de-replicated reads
to a temporary FASTA file, calls
parent method to do the actual
de-replication
Return: exact_match_id_map, a dictionary storing
de-replicated amplicon ID as key and
all original FASTA IDs with identical
sequences as values;
unique_seqs_fp, filepath to FASTA file
holding only de-replicated sequences
"""
# creating mapping for de-replicated reads
seqs_to_cluster, exact_match_id_map =\
self._prefilter_exact_matches(parse_fasta(open(seq_path, 'U')))
# create temporary file for storing the de-replicated reads
fd, unique_seqs_fp = mkstemp(
prefix='SumaClustExactMatchFilter', suffix='.fasta')
close(fd)
self.files_to_remove.append(unique_seqs_fp)
# write de-replicated reads to file
unique_seqs_f = open(unique_seqs_fp, 'w')
for seq_id, seq in seqs_to_cluster:
unique_seqs_f.write('>%s count=%d;\n%s\n'
% (seq_id,
len(exact_match_id_map[seq_id]),
seq))
unique_seqs_f.close()
# clean up the seqs_to_cluster list as it can be big and we
# don't need it again
del(seqs_to_cluster)
return exact_match_id_map, unique_seqs_fp
def __call__(self, seq_path=None, result_path=None, log_path=None):
self.log_lines = []
self.files_to_remove = []
prefilter_identical_sequences =\
self.Params['prefilter_identical_sequences']
original_fasta_path = seq_path
# Collapse idetical sequences to a new file
if prefilter_identical_sequences:
exact_match_id_map, seq_path =\
self._apply_identical_sequences_prefilter(seq_path)
# Run SumaClust, return a dict of output files
clusters = sumaclust_denovo_cluster(
seq_path=abspath(seq_path),
result_path=abspath(result_path),
shortest_len=self.Params['l'],
similarity=self.Params['similarity'],
threads=self.Params['threads'],
exact=self.Params['exact'],
HALT_EXEC=False)
# Clean up any temp files that were created
remove_files(self.files_to_remove)
# Create file for expanded OTU map
if prefilter_identical_sequences:
clusters = self._map_filtered_clusters_to_full_clusters(
clusters, exact_match_id_map)
self.log_lines.append('Num OTUs: %d' % len(clusters))
# Add prefix ID to de novo OTUs
otu_id_prefix = self.Params['denovo_otu_id_prefix']
if otu_id_prefix is None:
clusters = dict(enumerate(clusters))
else:
clusters = dict(('%s%d' % (otu_id_prefix, i), c)
for i, c in enumerate(clusters))
if result_path:
# If the user provided a result_path, write the
# results to file with one tab-separated line per
# cluster (this will over-write the default SumaClust
# OTU map with the extended OTU map)
of = open(result_path, 'w')
for cluster_id, cluster in clusters.items():
of.write('%s\t%s\n' % (cluster_id, '\t'.join(cluster)))
of.close()
result = None
self.log_lines.append('Result path: %s\n' % result_path)
else:
# if the user did not provide a result_path, store
# the clusters in a dict of {otu_id:[seq_ids]}, where
# otu_id is arbitrary
result = clusters
self.log_lines.append('Result path: None, returned as dict.')
# Log the run
if log_path:
log_file = open(log_path, 'w')
self.log_lines.insert(0, str(self))
log_file.write('\n'.join(self.log_lines))
log_file.close()
return result
class SwarmOtuPicker(OtuPicker):
""" Swarm is a de novo OTU picker, an exact clustering method based
on a single-linkage algorithm. It is open source and supports
SSE2 multithreading.
Clusters are created by their local clustering threshold 'd',
which is computed as the number of nucleotide differences
(substitution, insertion or deletion) between two amplicons
in the optimal pairwise global alignment.
This class is compatible with Swarm v.1.2.7
"""
def __init__(self, params):
""" Return a new SwarmOtuPicker object with specified params.
The defaults are set in the Swarm API
"""
OtuPicker.__init__(self, params)
def __call__(self, seq_path=None, result_path=None, log_path=None):
self.log_lines = []
# Run Swarm, return a list of lists (clusters)
clusters = swarm_denovo_cluster(
seq_path=seq_path,
d=self.Params['resolution'],
threads=self.Params['threads'],
HALT_EXEC=False)
self.log_lines.append('Num OTUs: %d' % len(clusters))
# Add prefix ID to de novo OTUs
otu_id_prefix = self.Params['denovo_otu_id_prefix']
if otu_id_prefix is None:
clusters = dict(enumerate(clusters))
else:
clusters = dict(('%s%d' % (otu_id_prefix, i), c)
for i, c in enumerate(clusters))
if result_path:
# If the user provided a result_path, write the
# results to file with one tab-separated line per
# cluster
of = open(result_path, 'w')
for cluster_id, cluster in clusters.items():
of.write('%s\t%s\n' % (cluster_id, '\t'.join(cluster)))
of.close()
result = None
self.log_lines.append('Result path: %s\n' % result_path)
else:
# if the user did not provide a result_path, store
# the clusters in a dict of {otu_id:[seq_ids]}, where
# otu_id is arbitrary
result = clusters
self.log_lines.append('Result path: None, returned as dict.')
# Log the run
if log_path:
log_file = open(log_path, 'w')
self.log_lines.insert(0, str(self))
log_file.write('\n'.join(self.log_lines))
log_file.close()
return result
class PrefixSuffixOtuPicker(OtuPicker):
Name = 'PrefixSuffixOtuPicker'
def __init__(self, params):
"""Return new OtuPicker object with specified params.
params contains both generic and per-method (e.g. for
cdhit application controller) params.
Some generic entries in params are:
Algorithm: algorithm used
Similarity: similarity threshold, default 0.97, corresponding to
genus-level OTUs ('Similarity' is a synonym for the '-c' parameter
to the cd-hit application controllers)
Application: 3rd-party application used
"""
_params = {'Similarity': 0.97,
'Algorithm': 'Prefix/suffix exact matching'}
_params.update(params)
OtuPicker.__init__(self, _params)
def __call__(self, seq_path, result_path=None, log_path=None,
prefix_length=50, suffix_length=50):
"""Returns dict mapping {otu_id:[seq_ids]} for each otu.
Parameters:
seq_path: path to file of sequences
result_path: path to file of results. If specified,
dumps the result to the desired path instead of returning it.
log_path: path to log, which includes dump of params.
prefix_prefilter_length: prefilters the sequence collection so
sequences whose first prefix_prefilter_length characters are
identical will automatically be grouped into the same OTU [off by
default, 100 is typically a good value if this filtering is
desired] -- useful for large sequence collections, when cdhit doesn't
scale well
"""
log_lines = []
log_lines.append('Prefix length: %d' % prefix_length)
log_lines.append('Suffix length: %d' % suffix_length)
assert prefix_length >= 0, 'Prefix length (%d) must be >= 0' % prefix_length
assert suffix_length >= 0, 'Suffix length (%d) must be >= 0' % suffix_length
clusters = self._collapse_exact_matches(parse_fasta(open(seq_path)),
prefix_length, suffix_length)
log_lines.append('Num OTUs: %d' % len(clusters))
if result_path:
# if the user provided a result_path, write the
# results to file with one tab-separated line per
# cluster
of = open(result_path, 'w')
for i, cluster in enumerate(clusters):
of.write('%s\t%s\n' % (i, '\t'.join(cluster)))
of.close()
result = None
log_lines.append('Result path: %s' % result_path)
else:
# if the user did not provide a result_path, store
# the clusters in a dict of {otu_id:[seq_ids]}, where
# otu_id is arbitrary
result = dict(enumerate(clusters))
log_lines.append('Result path: None, returned as dict.')
if log_path:
# if the user provided a log file path, log the run
log_file = open(log_path, 'w')
log_lines = [str(self)] + log_lines
log_file.write('\n'.join(log_lines))
# return the result (note this is None if the data was
# written to file)
return result
def _build_seq_hash(self, seq, prefix_length, suffix_length):
""" Merge the prefix and suffix into a hash for the OTU
"""
len_seq = len(seq)
if len_seq <= prefix_length + suffix_length:
return seq
prefix = seq[:prefix_length]
suffix = seq[len_seq - suffix_length:]
return prefix + suffix
def _collapse_exact_matches(self, seqs, prefix_length, suffix_length):
""" Cluster sequences into sets with identical prefix/suffix
"""
cluster_map = {}
for seq_id, seq in seqs:
seq_id = seq_id.split()[0]
seq_hash = self._build_seq_hash(seq, prefix_length, suffix_length)
try:
cluster_map[seq_hash].append(seq_id)
except KeyError:
cluster_map[seq_hash] = [seq_id]
return cluster_map.values()
class TrieOtuPicker(OtuPicker):
Name = 'TrieOtuPicker'
def __init__(self, params):
"""Return new OtuPicker object with specified params.
params contains both generic and per-method (e.g. for
cdhit application controller) params.
Some generic entries in params are:
Algorithm: algorithm used
Similarity: similarity threshold, default 0.97, corresponding to
genus-level OTUs ('Similarity' is a synonym for the '-c' parameter
to the cd-hit application controllers)
Application: 3rd-party application used
"""
_params = {'Similarity': 0.97,
'Algorithm': 'Trie prefix or suffix matching',
'Reverse': False}
_params.update(params)
OtuPicker.__init__(self, _params)
def __call__(self, seq_path, result_path=None, log_path=None):
"""Returns dict mapping {otu_id:[seq_ids]} for each otu.
Parameters:
seq_path: path to file of sequences
result_path: path to file of results. If specified,
dumps the result to the desired path instead of returning it.
log_path: path to log, which includes dump of params.
"""
log_lines = []
# Get the appropriate sequence iterator
if self.Params['Reverse']:
# Reverse the sequences prior to building the prefix map.
# This effectively creates a suffix map.
# Also removes descriptions from seq identifier lines
seqs = imap(lambda s: (s[0].split()[0], s[1][::-1]),
parse_fasta(open(seq_path)))
log_lines.append(
'Seqs reversed for suffix mapping (rather than prefix mapping).')
else:
# remove descriptions from seq identifier lines
seqs = imap(lambda s: (s[0].split()[0], s[1]),
parse_fasta(open(seq_path)))
# Build the mapping
t = CompressedTrie(fasta_to_pairlist(seqs))
mapping = t.prefix_map
log_lines.append('Num OTUs: %d' % len(mapping))
if result_path:
# if the user provided a result_path, write the
# results to file with one tab-separated line per
# cluster
of = open(result_path, 'w')
for i, (otu_id, members) in enumerate(mapping.iteritems()):
of.write('%s\t%s\n' % (i, '\t'.join([otu_id] + members)))
of.close()
result = None
log_lines.append('Result path: %s' % result_path)
else:
# if the user did not provide a result_path, store
# the clusters in a dict of {otu_id:[seq_ids]}, where
# otu_id is arbitrary
# add key to cluster_members
for key in mapping.keys():
mapping[key].append(key)
result = dict(enumerate(mapping.values()))
log_lines.append('Result path: None, returned as dict.')
if log_path:
# if the user provided a log file path, log the run
log_file = open(log_path, 'w')
log_lines = [str(self)] + log_lines
log_file.write('\n'.join(log_lines))
# return the result (note this is None if the data was
# written to file)
return result
class CdHitOtuPicker(OtuPicker):
Name = 'CdHitOtuPicker'
def __init__(self, params):
"""Return new OtuPicker object with specified params.
params contains both generic and per-method (e.g. for
cdhit application controller) params.
Some generic entries in params are:
Algorithm: algorithm used
Similarity: similarity threshold, default 0.97, corresponding to
genus-level OTUs ('Similarity' is a synonym for the '-c' parameter
to the cd-hit application controllers)
Application: 3rd-party application used
"""
_params = {'Similarity': 0.97,
'Application': 'cdhit',
'Algorithm': 'cdhit: "longest-sequence-first list removal algorithm"'}
_params.update(params)
OtuPicker.__init__(self, _params)
def __call__(self, seq_path, result_path=None, log_path=None,
id_len=0, prefix_prefilter_length=None, trie_prefilter=False):
"""Returns dict mapping {otu_id:[seq_ids]} for each otu.
Parameters:
seq_path: path to file of sequences
result_path: path to file of results. If specified,
dumps the result to the desired path instead of returning it.
log_path: path to log, which includes dump of params.
id_len: if set, truncates ids to n chars (you don't want this!)
prefix_prefilter_length: prefilters the sequence collection so
sequences whose first prefix_prefilter_length characters are
identical will automatically be grouped into the same OTU [off by
default, 100 is typically a good value if this filtering is
desired] -- useful for large sequence collections, when cdhit doesn't
scale well
trie_prefilter: prefilter the sequence collection such that all sequences
which are a prefix of another sequence are clustered with the other sequence.
Togther with cd-hit this is a non-heuristic filter reduces run time a lot.
Still a bit slower than the prefix_prefilter toggled with prefix_prefilter_length.
"""
log_lines = []
# create the params dict to pass to cd-hit-est -- IS THERE A
# BETTER WAY TO MAKE self.Params INTO THE params DICT TO PASS
# TO THE APPLICATION CONTROLLERS?
cd_hit_params = copy(self.Params)
del cd_hit_params['Application']
del cd_hit_params['Algorithm']
cd_hit_params['-d'] = id_len # turn off id truncation
cd_hit_params['-g'] = "1"
if (prefix_prefilter_length is not None and trie_prefilter):
log_lines.append("Both prefilters selected. Deactivate trie_prefilter")
trie_prefilter = False
if prefix_prefilter_length is not None:
log_lines.append(
'Prefix-based prefiltering, prefix length: %d'
% prefix_prefilter_length)
with open(seq_path) as seq_f:
seqs, filter_map = self._prefilter_exact_prefixes(
parse_fasta(seq_f, label_to_name=lambda x: x.split()[0]),
prefix_prefilter_length)
log_lines.append(
'Prefix-based prefiltering, post-filter num seqs: %d' % len(seqs))
elif trie_prefilter:
log_lines.append(
'Trie-based prefiltering')
seqs, filter_map = self._prefilter_with_trie(seq_path)
log_lines.append(
'Trie-based prefiltering, post-filter num seqs: %d' % len(seqs))
else:
log_lines.append('No prefix-based prefiltering.')
# Load the seq path. Right now, cdhit_clusters_from_seqs
# doesn't support being passed a file path even though the
# seqs do get written to a fasta file before being passed
# to cd-hit-est. We may want to change that in the future
# to avoid the overhead of loading large sequence collections
# during this step.
with open(seq_path) as seq_f:
seqs = SequenceCollection.from_fasta_records(
parse_fasta(seq_f, label_to_name=lambda x: x.split()[0]),
DNA)
seqs = dict(seqs.iteritems())
# Get the clusters by running cd-hit-est against the
# sequence collection
clusters = cdhit_clusters_from_seqs(
seqs=seqs, params=cd_hit_params)
if prefix_prefilter_length is not None or trie_prefilter:
clusters = self._map_filtered_clusters_to_full_clusters(
clusters, filter_map)
if result_path:
# if the user provided a result_path, write the
# results to file with one tab-separated line per
# cluster
of = open(result_path, 'w')
for i, cluster in enumerate(clusters):
of.write('%s\t%s\n' % (i, '\t'.join(cluster)))
of.close()
result = None
log_lines.append('Result path: %s' % result_path)
else:
# if the user did not provide a result_path, store
# the clusters in a dict of {otu_id:[seq_ids]}, where
# otu_id is arbitrary
result = dict(enumerate(clusters))
log_lines.append('Result path: None, returned as dict.')
if log_path:
# if the user provided a log file path, log the run
log_file = open(log_path, 'w')
log_lines = [str(self)] + log_lines
log_file.write('\n'.join(log_lines))
# return the result (note this is None if the data was
# written to file)
return result
class UclustOtuPickerBase(OtuPicker):
def _presort_by_abundance(self, seq_path):
""" Preform pre-sorting of input by abundance """
# Turn off uclust's sorting - if doing our presort by
# abundance we _always_ need to disable uclust's sorting.
self.Params['suppress_sort'] = True
# Get a temp file name for the sorted fasta file
fd, sorted_input_seqs_filepath = \
mkstemp(prefix=self.Name, suffix='.fasta')
close(fd)
# Sort input seqs by abundance, and write to the temp
# file
sort_fasta_by_abundance(open(seq_path, 'U'),
open(sorted_input_seqs_filepath, 'w'))
# Return the sorted sequences filepath
return sorted_input_seqs_filepath
def _write_log(self, log_path, log_lines):
# if the user provided a log file path, log the run
log_file = open(log_path, 'w')
log_file.write('\n'.join([str(self)] + log_lines))
log_file.close()
def _prepare_results(self, result_path, clusters, log_lines):
"""
"""
if result_path:
# if the user provided a result_path, write the
# results to file with one tab-separated line per
# cluster
of = open(result_path, 'w')
for cluster_id, cluster in clusters:
of.write('%s\t%s\n' % (cluster_id, '\t'.join(cluster)))
of.close()
result = None
log_lines.append('Result path: %s' % result_path)
else:
# if the user did not provide a result_path, store
# the clusters in a dict of {otu_id:[seq_ids]}, where
# otu_id is arbitrary
result = dict(clusters)
log_lines.append('Result path: None, returned as dict.')
return result
def _apply_identical_sequences_prefilter(self, seq_path):
""" """
fd, unique_seqs_fp = mkstemp(
prefix='UclustExactMatchFilter', suffix='.fasta')
close(fd)
seqs_to_cluster, exact_match_id_map =\
self._prefilter_exact_matches(parse_fasta(open(seq_path, 'U')))
self.files_to_remove.append(unique_seqs_fp)
unique_seqs_f = open(unique_seqs_fp, 'w')
for seq_id, seq in seqs_to_cluster:
unique_seqs_f.write('>%s\n%s\n' % (seq_id, seq))
unique_seqs_f.close()
# clean up the seqs_to_cluster list as it can be big and we
# don't need it again
# del(seqs_to_cluster)
return exact_match_id_map, unique_seqs_fp
class UclustOtuPicker(UclustOtuPickerBase):
""" Uclust based OTU picker
Important note - the default behaviour of uclust is to ignore
sequences of 32 nucleotides or less. These will be omitted
in the clusters generated. """
Name = 'UclustOtuPicker'
def __init__(self, params):
"""Return new OtuPicker object with specified params.
params contains both generic and per-method (e.g. for
uclust application controller) params.
Some generic entries in params are:
Similarity: similarity threshold, default 0.97, corresponding to
genus-level OTUs ('Similarity' is a synonym for the '--id' parameter
to the uclust application controllers)
Application: 3rd-party application used
"""
_params = {'Similarity': 0.97,
'Application': 'uclust',
'max_accepts': 1,
'max_rejects': 8,
'stepwords': 8,
'word_length': 8,
'enable_rev_strand_matching': False,
'optimal': False,
'exact': False,
'suppress_sort': True,
'presort_by_abundance': True,
'new_cluster_identifier': None,
'stable_sort': True,
'save_uc_files': True,
'output_dir': '.',
'prefilter_identical_sequences': True}
_params.update(params)
OtuPicker.__init__(self, _params)
def __call__(self,
seq_path,
result_path=None,
log_path=None,
HALT_EXEC=False):
"""Returns dict mapping {otu_id:[seq_ids]} for each otu.
Parameters:
seq_path: path to file of sequences
result_path: path to file of results. If specified,
dumps the result to the desired path instead of returning it.
log_path: path to log, which includes dump of params.
"""
prefilter_identical_sequences =\
self.Params['prefilter_identical_sequences']
original_fasta_path = seq_path
self.files_to_remove = []
if self.Params['presort_by_abundance']:
# seq path will become the temporary sorted sequences
# filepath, to be cleaned up after the run
seq_path = self._presort_by_abundance(seq_path)
self.files_to_remove.append(seq_path)
# Collapse idetical sequences to a new file
if prefilter_identical_sequences:
exact_match_id_map, seq_path =\
self._apply_identical_sequences_prefilter(seq_path)
# perform the clustering
clusters, failures, seeds = get_clusters_from_fasta_filepath(
seq_path,
original_fasta_path,
percent_ID=self.Params['Similarity'],
optimal=self.Params['optimal'],
exact=self.Params['exact'],
suppress_sort=self.Params['suppress_sort'],
enable_rev_strand_matching=
self.Params['enable_rev_strand_matching'],
max_accepts=self.Params['max_accepts'],
max_rejects=self.Params['max_rejects'],
stepwords=self.Params['stepwords'],
word_length=self.Params['word_length'],
stable_sort=self.Params['stable_sort'],
save_uc_files=self.Params['save_uc_files'],
output_dir=self.Params['output_dir'],
HALT_EXEC=HALT_EXEC)
# clean up any temp files that were created
remove_files(self.files_to_remove)
log_lines = []
log_lines.append('Num OTUs:%d' % len(clusters))
# expand identical sequences to create full OTU map
if prefilter_identical_sequences:
clusters = self._map_filtered_clusters_to_full_clusters(
clusters, exact_match_id_map)
otu_id_prefix = self.Params['new_cluster_identifier']
if otu_id_prefix is None:
clusters = enumerate(clusters)
else:
clusters = [('%s%d' % (otu_id_prefix, i), c)
for i, c in enumerate(clusters)]
result = self._prepare_results(result_path, clusters, log_lines)
if log_path:
self._write_log(log_path, log_lines)
# return the result (note this is None if the data was
# written to file)
return result
class UsearchOtuPicker(UclustOtuPickerBase):
""" Usearch based OTU picker
"""
Name = 'UsearchOtuPicker'
def __init__(self, params):
"""Return new OtuPicker object with specified params.
params contains both generic and per-method (e.g. for
usearch application controller) params.
Some generic entries in params are:
Similarity: similarity threshold, default 0.97, corresponding to
genus-level OTUs ('Similarity' is a synonym for the '--id' parameter
to the uclust application controllers)
Application: 3rd-party application used
"""
_params = {
'percent_id': 0.97,
'percent_id_err': 0.97,
'Application': 'usearch',
'minsize': 4,
'abundance_skew': 2,
'db_filepath': None,
'rev': False,
'label_prefix': "",
'label_suffix': "",
'retain_label_as_comment': False,
'count_start': 0,
'perc_id_blast': 0.97,
'save_intermediate_files': False,
'global_alignment': True,
'sizein': True,
'sizeout': True,
'w': 64,
'slots': 16769023,
'maxrejects': 64,
'minlen': 64,
'de_novo_chimera_detection': True,
'reference_chimera_detection': True,
'cluster_size_filtering': True,
'output_dir': '.',
'remove_usearch_logs': False,
'derep_fullseq': False,
'chimeras_retention': 'union',
'verbose': False}
_params.update(params)
OtuPicker.__init__(self, _params)
def __call__(self,
seq_path,
output_dir='.',
log_path=None,
HALT_EXEC=False,
failure_path=None,
result_path=None):
"""Returns dict mapping {otu_id:[seq_ids]} for each otu, and a list
of seq ids that failed the filters.
Parameters:
seq_path: path to file of sequences
output_dir: directory to output results, including log files and
intermediate files if flagged for.
log_path: path to log, which includes dump of params.
"""
original_fasta_path = seq_path
self.files_to_remove = []
if self.Params['db_filepath'] is None:
db_fp = None
else:
db_fp = abspath(self.Params['db_filepath'])
# perform the filtering/clustering
clusters, failures = usearch_qf(
seq_path,
output_dir=self.Params['output_dir'],
percent_id=self.Params['percent_id'],
percent_id_err=self.Params['percent_id_err'],
minsize=self.Params['minsize'],
abundance_skew=self.Params['abundance_skew'],
db_filepath=db_fp,
rev=self.Params['rev'],
label_prefix=self.Params['label_prefix'],
label_suffix=self.Params['label_suffix'],
retain_label_as_comment=self.Params['retain_label_as_comment'],
count_start=self.Params['count_start'],
perc_id_blast=self.Params['perc_id_blast'],
save_intermediate_files=self.Params['save_intermediate_files'],
global_alignment=self.Params['global_alignment'],
sizein=self.Params['sizein'],
sizeout=self.Params['sizeout'],
w=self.Params['w'],
slots=self.Params['slots'],
maxrejects=self.Params['maxrejects'],
minlen=self.Params['minlen'],
de_novo_chimera_detection=self.Params[
'de_novo_chimera_detection'],
reference_chimera_detection=self.Params[
'reference_chimera_detection'],
cluster_size_filtering=self.Params['cluster_size_filtering'],
remove_usearch_logs=self.Params['remove_usearch_logs'],
derep_fullseq=self.Params['derep_fullseq'],
chimeras_retention=self.Params['chimeras_retention'],
verbose=self.Params['verbose'],
HALT_EXEC=HALT_EXEC)
# clean up any temp files that were created
remove_files(self.files_to_remove)
log_lines = []
log_lines.append('Num OTUs:%d' % len(clusters))
log_lines.append('Num failures:%d' % len(failures))
if failure_path:
failure_file = open(failure_path, 'w')
failure_file.write('\n'.join(failures))
failure_file.close()
if log_path:
self._write_log(log_path, log_lines)
if result_path:
result_out = open(result_path, "w")
for cluster_id in clusters:
result_out.write(cluster_id + "\t" +
"\t".join(clusters[cluster_id]) + '\n')
result = None
else:
result = clusters
return result
class UsearchReferenceOtuPicker(UclustOtuPickerBase):
""" Usearch reference based OTU picker
"""
Name = 'UsearchReferenceOtuPicker'
def __init__(self, params):
"""Return new OtuPicker object with specified params.
params contains both generic and per-method (e.g. for
usearch application controller) params.
Some generic entries in params are:
Similarity: similarity threshold, default 0.97, corresponding to
genus-level OTUs ('Similarity' is a synonym for the '--id' parameter
to the uclust application controllers)
Application: 3rd-party application used
"""
_params = {
'percent_id': 0.97,
'percent_id_err': 0.97,
'Application': 'usearch',
'minsize': 4,
'abundance_skew': 2,
'db_filepath': None,
'rev': False,
'label_prefix': "",
'label_suffix': "",
'retain_label_as_comment': False,
'count_start': 0,
'perc_id_blast': 0.97,
'save_intermediate_files': False,
'global_alignment': True,
'sizein': True,
'sizeout': True,
'w': 64,
'slots': 16769023,
'maxrejects': 64,
'minlen': 64,
'de_novo_chimera_detection': True,
'reference_chimera_detection': True,
'cluster_size_filtering': True,
'output_dir': '.',
'remove_usearch_logs': False,
'suppress_new_clusters': False,
'derep_fullseq': False,
'chimeras_retention': 'union',
'verbose': False}
_params.update(params)
OtuPicker.__init__(self, _params)
def __call__(self,
seq_path,
refseqs_fp,
output_dir='.',
log_path=None,
HALT_EXEC=False,
failure_path=None,
result_path=None):
"""Returns dict mapping {otu_id:[seq_ids]} for each otu, and a list
of seq ids that failed the filters.
Parameters:
seq_path: path to file of sequences
output_dir: directory to output results, including log files and
intermediate files if flagged for.
log_path: path to log, which includes dump of params.
"""
original_fasta_path = seq_path
self.files_to_remove = []
# perform the filtering/clustering
clusters, failures = usearch_qf(
seq_path,
refseqs_fp,
output_dir=self.Params['output_dir'],
percent_id=self.Params['percent_id'],
percent_id_err=self.Params['percent_id_err'],
minsize=self.Params['minsize'],
abundance_skew=self.Params['abundance_skew'],
db_filepath=self.Params['db_filepath'],
rev=self.Params['rev'],
label_prefix=self.Params['label_prefix'],
label_suffix=self.Params['label_suffix'],
retain_label_as_comment=self.Params['retain_label_as_comment'],
count_start=self.Params['count_start'],
perc_id_blast=self.Params['perc_id_blast'],
save_intermediate_files=self.Params['save_intermediate_files'],
global_alignment=self.Params['global_alignment'],
sizein=self.Params['sizein'],
sizeout=self.Params['sizeout'],
w=self.Params['w'],
slots=self.Params['slots'],
maxrejects=self.Params['maxrejects'],
minlen=self.Params['minlen'],
de_novo_chimera_detection=self.Params[
'de_novo_chimera_detection'],
reference_chimera_detection=self.Params[
'reference_chimera_detection'],
cluster_size_filtering=self.Params['cluster_size_filtering'],
remove_usearch_logs=self.Params['remove_usearch_logs'],
suppress_new_clusters=self.Params['suppress_new_clusters'],
derep_fullseq=self.Params['derep_fullseq'],
chimeras_retention=self.Params['chimeras_retention'],
verbose=self.Params['verbose'],
HALT_EXEC=HALT_EXEC)
# clean up any temp files that were created
remove_files(self.files_to_remove)
log_lines = []
log_lines.append('Num OTUs:%d' % len(clusters))
log_lines.append('Num failures:%d' % len(failures))
log_lines.append('Reference database for OTU picking: %s' %
abspath(refseqs_fp))
if failure_path is not None:
failure_file = open(failure_path, 'w')
failure_file.write('\n'.join(failures))
failure_file.close()
if log_path:
self._write_log(log_path, log_lines)
if result_path:
result_out = open(result_path, "w")
for cluster_id in clusters:
result_out.write(cluster_id + "\t" +
"\t".join(clusters[cluster_id]) + '\n')
result = None
else:
result = clusters
return result
class Usearch610DeNovoOtuPicker(UclustOtuPickerBase):
""" Usearch based OTU picker, de novo clustering only
"""
Name = 'Usearch610DeNovoOtuPicker'
def __init__(self, params):
"""Return new OtuPicker object with specified params.
params contains both generic and per-method (e.g. for
usearch61 application controller) params.
Some generic entries in params are:
percent_id: similarity threshold, default 0.97, corresponding to
genus-level OTUs ('Similarity' is a synonym for the '--id' parameter
to the uclust application controllers)
Application: 3rd-party application used
"""
_params = {
'percent_id': 0.97,
'Application': 'usearch61',
'rev': False,
'save_intermediate_files': False,
'minlen': 64,
'output_dir': '.',
'remove_usearch_logs': False,
'verbose': False,
'wordlength': 8,
'usearch_fast_cluster': False,
'usearch61_sort_method': 'abundance',
'usearch61_maxrejects': 32,
'usearch61_maxaccepts': 1,
'sizeorder': False,
'threads': 1.0
}
_params.update(params)
OtuPicker.__init__(self, _params)
def __call__(self,
seq_path,
output_dir='.',
log_path=None,
HALT_EXEC=False,
result_path=None,
otu_prefix="denovo"):
"""Returns dict mapping {otu_id:[seq_ids]} for each otu
Parameters:
seq_path: path to file of sequences
output_dir: directory to output results, including log files and
intermediate files if flagged for.
log_path: path to log, which includes dump of params.
HALT_EXEC: Setting for halting execution of application controller,
should only be True when debugging.
result_path: If supplied will write out results (e.g. OTU mapping file),
otherwise a dict is returned with data.
"""
# perform de novo clustering
clusters = usearch61_denovo_cluster(
seq_path,
percent_id=self.Params['percent_id'],
rev=self.Params['rev'],
save_intermediate_files=self.Params['save_intermediate_files'],
minlen=self.Params['minlen'],
output_dir=self.Params['output_dir'],
remove_usearch_logs=self.Params['remove_usearch_logs'],
verbose=self.Params['verbose'],
wordlength=self.Params['wordlength'],
usearch_fast_cluster=self.Params['usearch_fast_cluster'],
usearch61_sort_method=self.Params['usearch61_sort_method'],
otu_prefix=otu_prefix,
usearch61_maxrejects=self.Params['usearch61_maxrejects'],
usearch61_maxaccepts=self.Params['usearch61_maxaccepts'],
sizeorder=self.Params['sizeorder'],
threads=self.Params['threads'],
HALT_EXEC=HALT_EXEC
)
log_lines = []
log_lines.append('Num OTUs:%d' % len(clusters))
if log_path:
self._write_log(log_path, log_lines)
if result_path:
result_out = open(result_path, "w")
for cluster_id in clusters:
result_out.write(cluster_id + "\t" +
"\t".join(clusters[cluster_id]) + '\n')
result_out.close()
result = None
else:
result = clusters
return result
class Usearch61ReferenceOtuPicker(UclustOtuPickerBase):
""" Usearch based OTU picker, supports closed or open reference OTU picking
"""
Name = 'Usearch61ReferenceOtuPicker'
def __init__(self, params):
"""Return new OtuPicker object with specified params.
params contains both generic and per-method (e.g. for
usearch61 application controller) params.
Some generic entries in params are:
percent_id: similarity threshold, default 0.97, corresponding to
genus-level OTUs ('Similarity' is a synonym for the '--id' parameter
to the uclust application controllers)
Application: 3rd-party application used
"""
_params = {
'percent_id': 0.97,
'Application': 'usearch61',
'rev': False,
'save_intermediate_files': False,
'minlen': 64,
'output_dir': '.',
'remove_usearch_logs': False,
'verbose': False,
'wordlength': 8,
'usearch_fast_cluster': False,
'usearch61_sort_method': 'abundance',
'usearch61_maxrejects': 32,
'usearch61_maxaccepts': 1,
'sizeorder': False,
'suppress_new_clusters': False,
'threads': 1.0
}
_params.update(params)
OtuPicker.__init__(self, _params)
def __call__(self,
seq_path,
refseqs_fp,
output_dir='.',
log_path=None,
HALT_EXEC=False,
result_path=None,
failure_path=None,
otu_prefix="denovo"):
"""Returns dict mapping {otu_id:[seq_ids]} for each otu
Parameters:
seq_path: path to file of sequences
refseqs_fp: Reference database to pick OTUs against
output_dir: directory to output results, including log files and
intermediate files if flagged for.
log_path: path to log, which includes dump of params.
HALT_EXEC: Setting for halting execution of application controller,
should only be True when debugging.
result_path: If supplied will write out results (e.g. OTU mapping file),
otherwise a dict is returned with data.
"""
# perform reference clustering
clusters, failures = usearch61_ref_cluster(
seq_path,
refseqs_fp,
percent_id=self.Params['percent_id'],
rev=self.Params['rev'],
save_intermediate_files=self.Params['save_intermediate_files'],
minlen=self.Params['minlen'],
output_dir=self.Params['output_dir'],
remove_usearch_logs=self.Params['remove_usearch_logs'],
verbose=self.Params['verbose'],
wordlength=self.Params['wordlength'],
usearch_fast_cluster=self.Params['usearch_fast_cluster'],
usearch61_sort_method=self.Params['usearch61_sort_method'],
otu_prefix=otu_prefix,
usearch61_maxrejects=self.Params['usearch61_maxrejects'],
usearch61_maxaccepts=self.Params['usearch61_maxaccepts'],
sizeorder=self.Params['sizeorder'],
suppress_new_clusters=self.Params['suppress_new_clusters'],
threads=self.Params['threads'],
HALT_EXEC=HALT_EXEC
)
log_lines = []
log_lines.append('Num OTUs:%d' % len(clusters))
if log_path:
self._write_log(log_path, log_lines)
if result_path:
result_out = open(result_path, "w")
for cluster_id in clusters:
result_out.write(cluster_id + "\t" +
"\t".join(clusters[cluster_id]) + '\n')
result_out.close()
result = None
else:
result = clusters
if failure_path:
self._write_failures(failure_path, failures)
return result, failures
def _write_failures(self, failure_path, failures):
failure_file = open(failure_path, 'w')
failure_file.write('\n'.join(failures))
failure_file.close()
class UclustReferenceOtuPicker(UclustOtuPickerBase):
"""Uclust reference OTU picker: clusters seqs by match to ref collection
"""
def __init__(self, params):
"""Return new UclustReferenceOtuPicker object with specified params.
"""
_params = {'Similarity': 0.97,
'Application': 'uclust',
'enable_rev_strand_matching': False,
'max_accepts': 1,
'max_rejects': 8,
'stepwords': 8,
'word_length': 8,
'suppress_new_clusters': False,
'optimal': False,
'exact': False,
'suppress_sort': False,
'new_cluster_identifier': 'QiimeOTU',
'next_new_cluster_number': 1,
'presort_by_abundance': True,
'stable_sort': True,
'save_uc_files': True,
'output_dir': '.',
'prefilter_identical_sequences': True}
_params.update(params)
OtuPicker.__init__(self, _params)
def __call__(self,
seq_fp,
refseqs_fp,
next_new_cluster_number=None,
new_cluster_identifier=None,
result_path=None,
log_path=None,
failure_path=None,
HALT_EXEC=False):
original_fasta_path = seq_fp
prefilter_identical_sequences =\
self.Params['prefilter_identical_sequences']
if new_cluster_identifier:
self.Params['new_cluster_identifier'] = new_cluster_identifier
if next_new_cluster_number is not None:
self.Params['next_new_cluster_number'] = next_new_cluster_number
self.files_to_remove = []
if self.Params['presort_by_abundance']:
# seq path will become the temporary sorted sequences
# filepath, to be cleaned up after the run
seq_fp = self._presort_by_abundance(seq_fp)
self.files_to_remove.append(seq_fp)
# Collapse idetical sequences to a new file
if prefilter_identical_sequences:
exact_match_id_map, seq_fp =\
self._apply_identical_sequences_prefilter(seq_fp)
# perform the clustering
cluster_map, failures, new_seeds = get_clusters_from_fasta_filepath(
seq_fp,
original_fasta_path,
subject_fasta_filepath=refseqs_fp,
percent_ID=self.Params['Similarity'],
enable_rev_strand_matching=self.Params[
'enable_rev_strand_matching'],
max_accepts=self.Params['max_accepts'],
max_rejects=self.Params['max_rejects'],
stepwords=self.Params['stepwords'],
word_length=self.Params['word_length'],
suppress_new_clusters=self.Params['suppress_new_clusters'],
optimal=self.Params['optimal'],
exact=self.Params['exact'],
suppress_sort=self.Params['suppress_sort'],
return_cluster_maps=True,
stable_sort=self.Params['stable_sort'],
save_uc_files=self.Params['save_uc_files'],
output_dir=self.Params['output_dir'],
HALT_EXEC=HALT_EXEC)
# expand identical sequences to create full OTU map
if prefilter_identical_sequences:
# expand the clusters (while retaining the names of
# the clusters so we know which are new OTUs and
# which are reference OTUs)
cluster_names = cluster_map.keys()
clusters = [cluster_map[c] for c in cluster_names]
clusters = self._map_filtered_clusters_to_full_clusters(
clusters, exact_match_id_map)
cluster_map = dict(zip(cluster_names, clusters))
# expand failures
temp_failures = []
for fa in failures:
temp_failures.extend(exact_match_id_map[fa])
failures = temp_failures
self._rename_clusters(cluster_map, new_seeds)
# clean up any temp files that were created
remove_files(self.files_to_remove)
log_lines = []
log_lines.append('Reference seqs:%s' % abspath(refseqs_fp))
log_lines.append('Num OTUs:%d' % len(cluster_map))
log_lines.append('Num new OTUs:%d' % len(new_seeds))
log_lines.append('Num failures:%d' % len(failures))
cluster_map = cluster_map.items()
result = self._prepare_results(result_path, cluster_map, log_lines)
if log_path:
self._write_log(log_path, log_lines)
if failure_path:
self._write_failures(failure_path, failures)
# return the result (note this is None if the data was
# written to file)
return result
def _rename_clusters(self, cluster_map, new_seeds):
""" """
next_new_cluster_number = self.Params['next_new_cluster_number']
new_cluster_identifier = self.Params['new_cluster_identifier']
new_seed_lookup = {}.fromkeys(new_seeds)
for seed, cluster in cluster_map.items():
del cluster_map[seed]
if seed in new_seed_lookup:
new_cluster_id = '%s%d' % (new_cluster_identifier,
next_new_cluster_number)
next_new_cluster_number += 1
else:
new_cluster_id = seed.split()[0]
cluster_map[new_cluster_id] = cluster
self.Params['next_new_cluster_number'] = next_new_cluster_number
def _write_failures(self, failure_path, failures):
# if the user provided a log file path, log the run
failure_file = open(failure_path, 'w')
failure_file.write('\n'.join(failures))
failure_file.close()
class MothurOtuPicker(OtuPicker):
Name = 'MothurOtuPicker'
ClusteringAlgorithms = ['furthest', 'nearest', 'average']
def __init__(self, params):
"""Return new MothurOtuPicker object with specified params.
Valid params are:
Algorithm
Algorithm used for clustering (valid choices are nearest,
furthest, average)
Similarity
Similarity threshold for OTUs (default 0.97)
"""
params['Application'] = 'mothur'
if 'Algorithm' not in params:
params['Algorithm'] = 'furthest'
if 'Similarity' not in params:
params['Similarity'] = 0.97
if params['Algorithm'] not in self.ClusteringAlgorithms:
raise ValueError('Unsupported algorithm %s. Choices are %s' %
(params['Algorithm'], self.ClusteringAlgorithms))
super(MothurOtuPicker, self).__init__(params)
def __call__(self, seq_path, result_path=None, log_path=None):
"""Returns dict mapping {otu_id:[seq_ids]} for each otu.
Parameters:
seq_path: path to file of sequences
result_path: path to file of results. If specified, should
dump the result to the desired path instead of returning it.
log_path: path to log, which should include dump of params.
"""
app = Mothur(
InputHandler='_input_as_path',
TmpDir=get_qiime_temp_dir())
app.Parameters['method'].on(self.Params['Algorithm'])
results = app(seq_path)
parsed_otus = mothur_parse(results['otu list'])
clusters = self.__pick_clusters(parsed_otus)
results.cleanUp()
# From here down, this is all copied straight from
# CdHitOtuPicker, and prime for refactoring into a private
# method of OtuPicker
if result_path:
# if the user provided a result_path, write the
# results to file with one tab-separated line per
# cluster
of = open(result_path, 'w')
for i, cluster in enumerate(clusters):
of.write('%s\t%s\n' % (i, '\t'.join(cluster)))
of.close()
result = None
log_str = 'Result path: %s' % result_path
else:
# if the user did not provide a result_path, store
# the clusters in a dict of {otu_id:[seq_ids]}, where
# otu_id is arbitrary
result = dict(enumerate(clusters))
log_str = 'Result path: None, returned as dict.'
if log_path:
# if the user provided a log file path, log the run
log_file = open(log_path, 'w')
log_file.write(str(self))
log_file.write('\n')
log_file.write('%s\n' % log_str)
# return the result (note this is None if the data was
# written to file)
return result
def __pick_clusters(self, mothur_results):
"""Returns OTU's that satisfy the given similarity threshold.
"""
# Sanity check
if not 0 <= self.Params['Similarity'] <= 1:
raise ValueError(
'Similarity threshold must be number between 0 and 1 '
'(received %)' % similarity_threshold)
# A lower mothur score means more otu's. To find otu's that
# satisfy a similarity threshold of 0.9, we must find the
# largest score less than or equal to (1 - 0.9 =) 0.1.
score_threshold = 1 - self.Params['Similarity']
my_score, my_otus = mothur_results.next()
for score, otus in mothur_results:
# Sanity check
if score < my_score:
raise ValueError(
'Mothur results not in ascending order. This is an error '
'in the Mothur application controller, and it should be '
'reported to the PyCogent developers.')
if score <= score_threshold:
my_score, my_otus = score, otus
else:
# Scores are only getting larger, so bail out now
break
return my_otus
# Some functions to support merging OTU tables
# generated one after another. This functionality is currently available
# via Qiime/scripts/merge_otu_maps.py and will be incorporated into the
# MetaPickOtus or ChainedPickOtus class when that comes into existence.
def expand_otu_map_seq_ids(otu_map, seq_id_map):
for otu_id, seq_ids in otu_map.items():
mapped_seq_ids = flatten(
[seq_id_map[seq_id] for seq_id in seq_ids])
otu_map[otu_id] = mapped_seq_ids
return otu_map
def expand_failures(failures, seq_id_map):
result = []
for failure in failures:
failure = failure.strip()
result += seq_id_map[failure]
return result
def map_otu_map_files(otu_files, failures_file=None):
# passing delim=None splits on any whitespace, so can handle mixed tabs
# and spaces
result = fields_to_dict(otu_files[0], delim=None)
for otu_file in otu_files[1:]:
current_otu_map = fields_to_dict(otu_file, delim=None)
result = expand_otu_map_seq_ids(current_otu_map, result)
if failures_file:
result = expand_failures(failures_file, result)
return result
# End functions to support merging OTU tables
otu_picking_method_constructors = {
'cdhit': CdHitOtuPicker,
'prefix_suffix': PrefixSuffixOtuPicker,
'mothur': MothurOtuPicker,
'trie': TrieOtuPicker,
'blast': BlastOtuPicker,
'uclust': UclustOtuPicker,
'uclust_ref': UclustReferenceOtuPicker,
'usearch': UsearchOtuPicker,
'usearch_ref': UsearchReferenceOtuPicker,
'usearch61': Usearch610DeNovoOtuPicker,
'usearch61_ref': Usearch61ReferenceOtuPicker,
'sumaclust': SumaClustOtuPicker,
'sortmerna': SortmernaV2OtuPicker,
'swarm': SwarmOtuPicker
}
otu_picking_method_choices = otu_picking_method_constructors.keys()
|
adamrp/qiime
|
qiime/pick_otus.py
|
Python
|
gpl-2.0
| 82,387
|
[
"BLAST"
] |
06e4d4b00fc3857092582e4426e18fa7252474661ca9ed27c9de0b22c066f6f0
|
from setuptools import setup
setup(
name='mercuro',
version='0.1.1',
author='Brian Cline',
author_email='brian.cline@gmail.com',
description=('A simple daemon that listens for syslog events and '
'forwards them to a Riemann server.'),
license = 'Apache',
keywords = 'syslog rsyslog riemann logging',
url = 'https://github.com/briancline/mercuro',
packages=['mercuro'],
long_description=open('README.md').read(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
)
|
briancline/mercuro
|
setup.py
|
Python
|
apache-2.0
| 833
|
[
"Brian"
] |
b9a918633c2b538bc96cc6e523e4b3d176f6186c1de1abc113254fe866805ae9
|
import unittest
from pycparser import parse_file
import minic.c_ast_to_minic as ctoc
import minic.minic_ast as mast
class TestVisitor(mast.NodeVisitor):
def __init__(self):
self.assignment_counter = 0
self.forl_counter = 0
def visit_Assignment(self, assignment):
self.assignment_counter += 1
def visit_For(self, forl):
self.forl_counter += 1
self.generic_visit(forl)
class TestNodeVisit(unittest.TestCase):
def test_visit(self):
ast = ctoc.transform(parse_file('./c_files/minic.c'))
vs = TestVisitor()
vs.visit(ast)
self.assertEqual(vs.assignment_counter, 5)
self.assertEqual(vs.forl_counter, 1)
|
martylee/Python
|
CSC410-Project-1-master/tests/test_nodevisitors.py
|
Python
|
gpl-2.0
| 699
|
[
"VisIt"
] |
1be42e637460301662fc42689984fa7ad12e6ed8304e46ce7a89543cf5099a64
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
__all__.append('_exit')
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
try:
from ce import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def _get_masked_mode(mode):
mask = umask(0)
umask(mask)
return mode & ~mask
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(path [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. If the
target directory with the same mode as we specified already exists,
raises an OSError if exist_ok is False, otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# be happy if someone already created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError as e:
dir_exists = path.isdir(name)
expected_mode = _get_masked_mode(mode)
if dir_exists:
# S_ISGID is automatically copied by the OS from parent to child
# directories on mkdir. Don't consider it being set to be a mode
# mismatch as mkdir does not unset it when not specified in mode.
actual_mode = st.S_IMODE(lstat(name).st_mode) & ~st.S_ISGID
else:
actual_mode = -1
if not (e.errno == errno.EEXIST and exist_ok and dir_exists and
actual_mode == expected_mode):
if dir_exists and actual_mode != expected_mode:
e.strerror += ' (mode %o != expected mode %o)' % (
actual_mode, expected_mode)
raise
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir is global in this module due
# to earlier import-*.
names = listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except FileNotFoundError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except FileNotFoundError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
return
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except OSError as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
errors = 'strict'
else:
errors = 'surrogateescape'
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
|
171121130/SWI
|
venv/Lib/os.py
|
Python
|
mit
| 34,425
|
[
"VisIt"
] |
8a9eb9c72489e99f4d20c3e9ca11b15aaaeadcc129db6b94cafb1df1fe4406f9
|
########################################################################
# File: RegisterOperation.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/19 13:55:14
########################################################################
""" :mod: RegisterFile
==================
.. module: RegisterFile
:synopsis: register operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
RegisterFile operation handler
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id $"
# #
# @file RegisterOperation.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/19 13:55:24
# @brief Definition of RegisterOperation class.
# # imports
from DIRAC import S_OK, S_ERROR
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.MonitoringSystem.Client.MonitoringReporter import MonitoringReporter
########################################################################
class RegisterFile(OperationHandlerBase):
"""
.. class:: RegisterOperation
RegisterFile operation handler
:param self: self reference
:param ~DIRAC.RequestManagementSystem.Client.Operation.Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
def __init__(self, operation=None, csPath=None):
"""c'tor
"""
OperationHandlerBase.__init__(self, operation, csPath)
def __call__(self):
""" call me maybe """
# The flag 'rmsMonitoring' is set by the RequestTask and is False by default.
# Here we use 'createRMSRecord' to create the ES record which is defined inside OperationHandlerBase.
if self.rmsMonitoring:
self.rmsMonitoringReporter = MonitoringReporter(monitoringType="RMSMonitoring")
else:
# # RegisterFile specific monitor info
gMonitor.registerActivity("RegisterAtt", "Attempted file registrations",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("RegisterOK", "Successful file registrations",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("RegisterFail", "Failed file registrations",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
# # counter for failed files
failedFiles = 0
# # catalog(s) to use
catalogs = self.operation.Catalog
if catalogs:
catalogs = [cat.strip() for cat in catalogs.split(',')]
dm = DataManager(catalogs=catalogs)
# # get waiting files
waitingFiles = self.getWaitingFilesList()
if self.rmsMonitoring:
self.rmsMonitoringReporter.addRecord(
self.createRMSRecord("Attempted", len(waitingFiles))
)
# # loop over files
for opFile in waitingFiles:
if not self.rmsMonitoring:
gMonitor.addMark("RegisterAtt", 1)
# # get LFN
lfn = opFile.LFN
# # and others
fileTuple = (lfn, opFile.PFN, opFile.Size, self.operation.targetSEList[0], opFile.GUID, opFile.Checksum)
# # call DataManager
registerFile = dm.registerFile(fileTuple)
# # check results
if not registerFile["OK"] or lfn in registerFile["Value"]["Failed"]:
if self.rmsMonitoring:
self.rmsMonitoringReporter.addRecord(
self.createRMSRecord("Failed", 1)
)
else:
gMonitor.addMark("RegisterFail", 1)
# self.dataLoggingClient().addFileRecord(
# lfn, "RegisterFail", ','.join(catalogs) if catalogs else "all catalogs", "", "RegisterFile")
reason = str(registerFile.get("Message", registerFile.get("Value", {}).get("Failed", {}).get(lfn, 'Unknown')))
errorStr = "failed to register LFN"
opFile.Error = "%s: %s" % (errorStr, reason)
if 'GUID already registered' in reason:
opFile.Status = 'Failed'
self.log.error(errorStr, "%s: %s" % (lfn, reason))
elif 'File already registered with no replicas' in reason:
self.log.warn(errorStr, "%s: %s, will remove it and retry" % (lfn, reason))
dm.removeFile(lfn)
else:
self.log.warn(errorStr, "%s: %s" % (lfn, reason))
failedFiles += 1
else:
if self.rmsMonitoring:
self.rmsMonitoringReporter.addRecord(
self.createRMSRecord("Successful", 1)
)
else:
gMonitor.addMark("RegisterOK", 1)
# self.dataLoggingClient().addFileRecord(
# lfn, "Register", ','.join(catalogs) if catalogs else "all catalogs", "", "RegisterFile")
self.log.verbose("file %s has been registered at %s" %
(lfn, ','.join(catalogs) if catalogs else "all catalogs"))
opFile.Status = "Done"
if self.rmsMonitoring:
self.rmsMonitoringReporter.commit()
# # final check
if failedFiles:
self.log.warn("all files processed, %s files failed to register" % failedFiles)
self.operation.Error = "some files failed to register"
return S_ERROR(self.operation.Error)
return S_OK()
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/Agent/RequestOperations/RegisterFile.py
|
Python
|
gpl-3.0
| 5,299
|
[
"DIRAC"
] |
8bc9bcce591d5a05ecd7bd50348dac5a9372f92ec720a92d10a625abcd61ec9c
|
"""
Manage transfers from arbitrary URLs to temporary files. Socket interface for
IPC with multiple process configurations.
"""
import os, subprocess, socket, logging, threading
from galaxy import eggs
from galaxy.util import listify, json
log = logging.getLogger( __name__ )
class TransferManager( object ):
"""
Manage simple data transfers from URLs to temporary locations.
"""
def __init__( self, app ):
self.app = app
self.sa_session = app.model.context.current
self.command = 'python %s' % os.path.abspath( os.path.join( os.getcwd(), 'scripts', 'transfer.py' ) )
if app.config.get_bool( 'enable_job_recovery', True ):
# Only one Galaxy server process should be able to recover jobs! (otherwise you'll have nasty race conditions)
self.running = True
self.sleeper = Sleeper()
self.restarter = threading.Thread( target=self.__restarter )
self.restarter.start()
def new( self, path=None, **kwd ):
if 'protocol' not in kwd:
raise Exception( 'Missing required parameter "protocol".' )
protocol = kwd[ 'protocol' ]
if protocol in [ 'http', 'https' ]:
if 'url' not in kwd:
raise Exception( 'Missing required parameter "url".' )
elif protocol == 'scp':
# TODO: add more checks here?
if 'sample_dataset_id' not in kwd:
raise Exception( 'Missing required parameter "sample_dataset_id".' )
if 'file_path' not in kwd:
raise Exception( 'Missing required parameter "file_path".' )
transfer_job = self.app.model.TransferJob( state=self.app.model.TransferJob.states.NEW, params=kwd )
self.sa_session.add( transfer_job )
self.sa_session.flush()
return transfer_job
def run( self, transfer_jobs ):
"""
This method blocks, so if invoking the transfer manager ever starts
taking too long, we should move it to a thread. However, the
transfer_manager will either daemonize or return after submitting to a
running daemon, so it should be fairly quick to return.
"""
transfer_jobs = listify( transfer_jobs )
printable_tj_ids = ', '.join( [ str( tj.id ) for tj in transfer_jobs ] )
log.debug( 'Initiating transfer job(s): %s' % printable_tj_ids )
# Set all jobs running before spawning, or else updating the state may
# clobber a state change performed by the worker.
[ tj.__setattr__( 'state', tj.states.RUNNING ) for tj in transfer_jobs ]
self.sa_session.add_all( transfer_jobs )
self.sa_session.flush()
for tj in transfer_jobs:
params_dict = tj.params
protocol = params_dict[ 'protocol' ]
# The transfer script should daemonize fairly quickly - if this is
# not the case, this process will need to be moved to a
# non-blocking method.
cmd = '%s %s' % ( self.command, tj.id )
log.debug( 'Transfer command is: %s' % cmd )
p = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
p.wait()
output = p.stdout.read( 32768 )
if p.returncode != 0:
log.error( 'Spawning transfer job failed: %s: %s' % ( tj.id, output ) )
tj.state = tj.states.ERROR
tj.info = 'Spawning transfer job failed: %s' % output.splitlines()[-1]
self.sa_session.add( tj )
self.sa_session.flush()
def get_state( self, transfer_jobs, via_socket=False ):
transfer_jobs = listify( transfer_jobs )
rval = []
for tj in transfer_jobs:
if via_socket and tj.state not in tj.terminal_states and tj.socket:
try:
request = json.jsonrpc_request( method='get_state', id=True )
sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
sock.settimeout( 5 )
sock.connect( ( 'localhost', tj.socket ) )
sock.send( json.dumps( request ) )
response = sock.recv( 8192 )
valid, response = json.validate_jsonrpc_response( response, id=request['id'] )
if not valid:
# No valid response received, make some pseudo-json-rpc
raise Exception( dict( code=128, message='Did not receive valid response from transfer daemon for state' ) )
if 'error' in response:
# Response was valid but Request resulted in an error
raise Exception( response['error'])
else:
# Request was valid
response['result']['transfer_job_id'] = tj.id
rval.append( response['result'] )
except Exception, e:
# State checking via the transfer daemon failed, just
# return the state from the database instead. Callers can
# look for the 'error' member of the response to see why
# the check failed.
self.sa_session.refresh( tj )
error = e.args
if type( error ) != dict:
error = dict( code=256, message='Error connecting to transfer daemon', data=str( e ) )
rval.append( dict( transfer_job_id=tj.id, state=tj.state, error=error ) )
else:
self.sa_session.refresh( tj )
rval.append( dict( transfer_job_id=tj.id, state=tj.state ) )
for tj_state in rval:
if tj_state['state'] in self.app.model.TransferJob.terminal_states:
log.debug( 'Transfer job %s is in terminal state: %s' % ( tj_state['transfer_job_id'], tj_state['state'] ) )
elif tj_state['state'] == self.app.model.TransferJob.states.PROGRESS and 'percent' in tj_state:
log.debug( 'Transfer job %s is %s%% complete' % ( tj_state[ 'transfer_job_id' ], tj_state[ 'percent' ] ) )
if len( rval ) == 1:
return rval[0]
return rval
def __restarter( self ):
log.info( 'Transfer job restarter starting up...' )
while self.running:
dead = []
self.sa_session.expunge_all() # our session is threadlocal so this is safe.
for tj in self.sa_session.query( self.app.model.TransferJob ) \
.filter( self.app.model.TransferJob.state == self.app.model.TransferJob.states.RUNNING ):
if not tj.pid:
continue
# This will only succeed if the process exists and is owned by the
# user running Galaxy (unless that user is root, in which case it
# can be owned by anyone - but you're not running Galaxy as root,
# right?). This is not guaranteed proof that the transfer is alive
# since another process may have assumed the original process' PID.
# But that will only cause the transfer to not restart until that
# process dies, which hopefully won't be too long from now... If
# it becomes a problem, try to talk to the socket a few times and
# restart the transfer if socket communication fails repeatedly.
try:
os.kill( tj.pid, 0 )
except:
self.sa_session.refresh( tj )
if tj.state == tj.states.RUNNING:
log.error( 'Transfer job %s is marked as running but pid %s appears to be dead.' % ( tj.id, tj.pid ) )
dead.append( tj )
if dead:
self.run( dead )
self.sleeper.sleep( 30 )
log.info( 'Transfer job restarter shutting down...' )
def shutdown( self ):
self.running = False
self.sleeper.wake()
class Sleeper( object ):
"""
Provides a 'sleep' method that sleeps for a number of seconds *unless*
the notify method is called (from a different thread).
"""
def __init__( self ):
self.condition = threading.Condition()
def sleep( self, seconds ):
self.condition.acquire()
self.condition.wait( seconds )
self.condition.release()
def wake( self ):
self.condition.acquire()
self.condition.notify()
self.condition.release()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/jobs/transfer_manager.py
|
Python
|
gpl-3.0
| 8,605
|
[
"Galaxy"
] |
63902fca5f0c33dc8eafed12033be2b730b183f8682026ab2f95ddacf7aee4cd
|
from pyramid import testing
from pyramid.session import UnencryptedCookieSessionFactoryConfig
import pytest
from pyramid_crud import forms, views
from webob.multidict import MultiDict
@pytest.fixture
def pyramid_request():
r = testing.DummyRequest()
r.POST = MultiDict()
return r
@pytest.yield_fixture
def config():
cfg = testing.setUp(autocommit=False)
cfg.include('pyramid_mako')
cfg.include('pyramid_crud')
sess = UnencryptedCookieSessionFactoryConfig('itsaseekreet')
cfg.set_session_factory(sess)
cfg.commit()
yield cfg
testing.tearDown()
class TestMinimal(object):
@pytest.fixture
def minimal_model(self, model_factory):
Model = model_factory()
return Model
@pytest.fixture
def minimal_form(self, minimal_model):
class MyForm(forms.CSRFModelForm):
class Meta:
model = minimal_model
return MyForm
@pytest.fixture
def minimal_view(self, config, minimal_form, DBSession, pyramid_request):
class MyView(views.CRUDView):
Form = minimal_form
url_path = '/items'
pyramid_request.dbsession = DBSession
return MyView
@pytest.fixture(autouse=True)
def _prepare(self, minimal_model, minimal_form, minimal_view):
self.model = minimal_model
self.form = minimal_form
self.view = minimal_view
def test_minimal_list(self, minimal_view, pyramid_request):
result = minimal_view(pyramid_request).list()
assert len(result) == 2
assert 'action_form' in result
assert result['items'].all() == []
def test_minimal_list_items(self, minimal_view, pyramid_request, DBSession,
minimal_model):
DBSession.add_all([minimal_model(), minimal_model()])
DBSession.flush()
result = minimal_view(pyramid_request).list()
assert len(result) == 2
assert 'action_form' in result
items = result['items'].all()
assert len(items) == 2
for item in items:
assert item.id
def test_minimal_new(self, minimal_view, pyramid_request):
result = minimal_view(pyramid_request).edit()
assert len(result) == 2
assert result['is_new']
form = result['form']
assert len(list(form)) == 1
assert form.csrf_token
def test_minimal_edit(self, minimal_view, pyramid_request, minimal_model,
DBSession):
DBSession.add_all([minimal_model(), minimal_model()])
DBSession.flush()
pyramid_request.matchdict["id"] = 1
result = minimal_view(pyramid_request).edit()
assert len(result) == 2
assert not result['is_new']
form = result['form']
assert len(list(form)) == 1
assert form.csrf_token
class TestFunctional(object):
@pytest.fixture(autouse=True)
def _prepare(self):
from .test_app import main
from webtest import TestApp
self.app = TestApp(main())
def test_list_empty(self):
response = self.app.get('/polls')
assert response.status_int == 200
assert "<title>Polls | CRUD</title>" in response
tables = response.html.find_all("table")
assert len(tables) == 1
[table] = tables
titles = ['Question', 'Date Published', 'Published?']
for td, title in zip(table.find("thead").find("tr").find_all("td"),
titles):
assert td.string == title
rows = response.html.find("tbody").find_all("tr")
assert len(rows) == 0
def test_new_edit_delete(self):
# Get a list of items
response = self.app.get('/polls')
assert response.status_int == 200
# Fetch the form for a new item
response = response.click(href="http://localhost/polls/new")
assert response.status_int == 200
assert len(response.forms) == 1
# Make sure the extra fields are there
assert 'choice_0_choice_text' in response.form.fields
assert 'choice_1_choice_text' in response.form.fields
assert 'choice_2_choice_text' not in response.form.fields
# Remove the extra fields
response = response.form.submit('delete_choice_0')
assert response.status_int == 200
response = response.form.submit('delete_choice_0')
assert response.status_int == 200
# Create an invalid form
response = response.form.submit('save_close')
assert response.status_int == 200
assert "This field is required" in response
# Fill out the form properly
form = response.form
form['question'] = "Wazzup"
form['pub_date'] = '2014-04-09 10:48:17'
response = response.form.submit('save_close')
assert response.status_int == 302
response = response.follow()
assert "Poll added!" in response
# Make sure the item was actually added
table = response.html.find("table")
rows = table.find("tbody").find_all("tr")
assert len(rows) == 1
_, question, pub_date, published = rows[0].find_all("td")
assert question.find("a").string.strip() == "Wazzup"
href = question.find("a").attrs["href"]
assert href == "http://localhost/polls/1/edit"
assert pub_date.string.strip() == '2014-04-09 10:48:17'
assert published.string.strip() == 'No'
# Visit the existing item to edit it and add a choice
response = response.click(href=href)
response = response.form.submit('add_choice')
form = response.form
form['choice_0_choice_text'] = 'Choice1'
form['choice_0_votes'] = '5'
response = response.form.submit('add_choice')
form = response.form
form['choice_1_choice_text'] = 'Choice2'
form['choice_1_votes'] = '6'
response = form.submit('save')
assert response.status_int == 302
response = response.follow()
assert "Poll edited!" in response
# Now delete that saved choice again and make sure it is persisted
form = response.form
assert form['choice_0_id'].value == '1'
assert form['choice_0_choice_text'].value == 'Choice1'
assert form['choice_1_id'].value == '2'
assert form['choice_1_choice_text'].value == 'Choice2'
assert 'choice_2_id' not in form.fields
assert 'choice_2_choice_text' not in form.fields
response = form.submit('delete_choice_0')
assert response.status_int == 200
form = response.form
assert "delete_choice_1" not in form.fields
response = form.submit('delete_choice_0')
assert response.status_int == 200
# Make sure it is gone
form = response.form
assert 'choice_0_id' not in form.fields
assert 'choice_0_choice_text' not in form.fields
assert 'choice_1_choice_text' not in form.fields
response = form.submit('save_close')
assert response.status_int == 302
response = response.follow()
assert "Poll edited!" in response
# Create a second poll
response = response.click(href="http://localhost/polls/new")
response = response.form.submit('delete_choice_0')
response = response.form.submit('delete_choice_0')
form = response.form
form['question'] = "Wazzup"
form['pub_date'] = '2014-04-09 10:48:17'
response = response.form.submit('save_close')
response = response.follow()
assert "Poll added!" in response
# Delete the Poll
form = response.form
form['action'] = 'delete'
form['items'] = ['1']
response = form.submit('submit')
assert response.status_int == 200
# Press Cancel
response = response.click(href="http://localhost/polls")
assert response.status_int == 200
# Delete (for real)
form = response.form
form['action'] = 'delete'
form['items'] = ['1']
response = form.submit('submit')
assert response.status_int == 200
# Confirm deletion
response = response.form.submit('confirm_delete')
assert response.status_int == 302
response = response.follow()
assert "1 Poll deleted!" in response
|
Javex/pyramid_crud
|
tests/test_integration.py
|
Python
|
mit
| 8,372
|
[
"VisIt"
] |
69690b0444f08b2ba0edd74dc38423264540464855ef280aeedf47f2ab443f81
|
#!/usr/bin/env python
import vtk
class TerminationCrashTestCase:
"""This test case should produce a crash on termination if we aren't
careful in DECREFing a Python callback via vtkPythonCommand.
The problem occurs only when you have an uncollectable reference
cycle with an observer callback. This should cause a crash upon
termination (trying to delete python object after interpreter
shutdown), or an assertion error if the observed event is
'DeleteEvent' or 'AnyEvent' (trying to invoke python method after
interpreter shutdown). The test case basically consists of a python
class which contains a vtk object which is being observed by that
same class. There is no error if the event handler is a member of a
different class, even if the instance of that class is contained by
the same python object which contains the vtk object. There is also
no error if a vtk object is subclassed in python and that subclass
has an event handler for itself (self.AddObserver(Event,
self.Handler)). Finally, the problem disappears if the container
class has a cyclic reference to itself (self.CyclicReference =
self). """
def __init__(self) :
self.Object = vtk.vtkObject()
self.Object.AddObserver('StartEvent', self.Handler)
def Handler(self, obj, evt) :
print 'event received'
test = TerminationCrashTestCase()
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/Common/Core/Testing/Python/TestTerminationCrash.py
|
Python
|
mit
| 1,445
|
[
"VTK"
] |
cec69741e62d943bd48fbc4c94d2069cc214e9b00d6891e6bee21624d124bb78
|
"""
This module is used to create an appropriate object which can be used to insert records to the Monitoring system.
It always try to insert the records directly. In case of failure the monitoring client is used...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
def getDBOrClient(DB, serverName):
"""Tries to instantiate the DB object and returns it if we manage to connect to the DB,
otherwise returns a Client of the server
"""
from DIRAC import gLogger
from DIRAC.Core.Base.Client import Client
try:
database = DB()
if database._connected:
return database
except Exception:
pass
gLogger.info("Can not connect to DB will use %s" % serverName)
return Client(url=serverName)
def getMonitoringDB():
serverName = "Monitoring/Monitoring"
MonitoringDB = None
try:
from DIRAC.MonitoringSystem.DB.MonitoringDB import MonitoringDB
except Exception:
pass
return getDBOrClient(MonitoringDB, serverName)
monitoringDB = getMonitoringDB()
|
ic-hep/DIRAC
|
src/DIRAC/MonitoringSystem/Client/ServerUtils.py
|
Python
|
gpl-3.0
| 1,130
|
[
"DIRAC"
] |
95cbf92ed58963a252bd131a035fff052e563fd7cba38241b56c5a7bd72decfc
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The ants module provides basic functions for interfacing with WMQL."""
import os
import glob
from nipype.interfaces.base import (CommandLine, CommandLineInputSpec, TraitedSpec, traits)
class TractQuerierInputSpec(CommandLineInputSpec):
atlas_type = traits.Enum('Desikan', 'Mori', argstr='-q %s', usedefault=True,
desc='Atlas type for the queries')
input_atlas = traits.File(desc="Input Atlas volume", exists=False, mandatory=True, argstr="-a %s", copy_file=False)
input_tractography = traits.File(desc="Input Tractography", exists=False, mandatory=True, argstr="-t %s", copy_file=False)
out_prefix = traits.Str('query', des="prefix for the results", mandatory=False, argstr="-o %s", usedefault=True)
queries = traits.List(desc="Input queries", exists=True, mandatory=False, argstr="--query_selection %s")
class TractQuerierOutputSpec(TraitedSpec):
output_queries = traits.List(exists=True, desc='resulting query files')
class TractQuerier(CommandLine):
"""Uses WMQL to generate white matter tracts
Examples
--------
>>> from ..nipype.wmql import TractQuerier
>>> import os
>>> tract_querier = TractQuerier()
>>> tract_querier.inputs.atlas_type = 'Desikan'
>>> tract_querier.inputs.input_atlas = 'wmparc.nii.gz'
>>> tract_querier.inputs.input_tractography = 'tracts.vtk'
>>> tract_querier.inputs.out_prefix = 'query_'
>>> tract_querier.inputs.queries = ['ilf.left' ,'ilf.right']
>>> tract_querier.cmdline
'tract_querier -q freesurfer_queries.qry -a wmparc.nii.gz -t tracts.vtk -o query_ --query_selection ilf.left,ilf.right'
"""
_cmd = 'tract_querier'
input_spec = TractQuerierInputSpec
output_spec = TractQuerierOutputSpec
def _format_arg(self, name, spec, value):
if name == 'atlas_type':
return spec.argstr % {"Mori": 'mori_queries.qry', "Desikan": 'freesurfer_queries.qry'}[value]
elif name == 'queries':
return spec.argstr % (''.join((q + ',' for q in value[:-1])) + value[-1])
return super(TractQuerier, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_queries'] = glob.glob(os.path.join(os.getcwd(),
self.inputs.out_prefix +
'*.vtk')
)
return outputs
class MapImageToTractsInputSpec(CommandLineInputSpec):
input_tractography = traits.File(desc="Input Tractography", exists=False, mandatory=True, argstr="%s", copy_file=False, position=0)
input_image = traits.File(desc="Input Image", exists=False, mandatory=True, argstr="-i %s", copy_file=False)
output_tractography_prefix = traits.File('out_', desc="output tract name", mandatory=False, argstr="-o %s", usedefault=True)
data_name = traits.String(desc="Name of the property", mandatory=True, argstr="-n %s")
output_point_value_prefix = traits.File(
'out_',
desc="Values of the image at every point of the tract", exists=False, mandatory=False, argstr="--output_point_value_file %s",
usedefault=True
)
class MapImageToTractsOutputSpec(TraitedSpec):
output_tractography = traits.File(exists=True, desc='output tractography')
output_point_value = traits.File(exists=True, desc='output values on tractography')
class MapImageToTracts(CommandLine):
"""Uses WMQL to generate white matter tracts
Examples
--------
>>> from ..nipype.wmql import MapImageToTracts
>>> tract_mapper = MapImageToTracts()
>>> tract_mapper.inputs.output_tractography_prefix = 'out_'
>>> tract_mapper.inputs.input_image = 'fa.nii.gz'
>>> tract_mapper.inputs.data_name = 'FA'
>>> tract_mapper.inputs.input_tractography = 'tracts.vtk'
>>> tract_mapper.cmdline
'tq_map_image_to_tracts tracts.vtk -n FA -i fa.nii.gz --output_point_value_file out_tracts.txt -o out_tracts.vtk'
"""
_cmd = 'tq_map_image_to_tracts'
input_spec = MapImageToTractsInputSpec
output_spec = MapImageToTractsOutputSpec
def _format_arg(self, name, spec, value):
if name == 'output_tractography_prefix':
return spec.argstr % ((
value + os.path.basename(self.inputs.input_tractography)
))
if name == 'output_point_value_prefix':
basename = os.path.basename(self.inputs.input_tractography)
name, ext = os.path.splitext(basename)
return spec.argstr % ((
value +
name +
'.txt'
))
return super(MapImageToTracts, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_tractography'] = os.path.join(
os.getcwd(),
(
self.inputs.output_tractography_prefix +
os.path.basename(self.inputs.input_tractography)
)
)
basename = os.path.basename(self.inputs.input_tractography)
name, ext = os.path.splitext(basename)
outputs['output_point_value'] = os.path.join(
os.getcwd(),
(
self.inputs.output_point_value_prefix +
name +
'.txt'
))
return outputs
|
BRAINSia/tract_querier
|
tract_querier/nipype/wmql.py
|
Python
|
bsd-3-clause
| 5,548
|
[
"VTK"
] |
a09b5e727ce4c4eeec629fa7ca13ab27a83478cfb571c2108cff3c2b0078ba7c
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageEllipsoidSource(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageEllipsoidSource(), 'Processing.',
(), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkImageEllipsoidSource.py
|
Python
|
bsd-3-clause
| 488
|
[
"VTK"
] |
dc5642086cbfc93e32eb7c101214369bac571d00a7f19635785b3f302660c72d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Radicale Server - Calendar Server
# Copyright © 2009-2013 Guillaume Ayoub
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Radicale CalDAV and CardDAV server
==================================
The Radicale Project is a CalDAV (calendar) and CardDAV (contact) server. It
aims to be a light solution, easy to use, easy to install, easy to configure.
As a consequence, it requires few software dependances and is pre-configured to
work out-of-the-box.
The Radicale Project runs on most of the UNIX-like platforms (Linux, BSD,
MacOS X) and Windows. It is known to work with Evolution, Lightning, iPhone
and Android clients. It is free and open-source software, released under GPL
version 3.
For further information, please visit the `Radicale Website
<http://www.radicale.org/>`_.
"""
from distutils.core import setup
import radicale
# When the version is updated, ``radicale.VERSION`` must be modified.
# A new section in the ``NEWS`` file must be added too.
setup(
name="Radicale",
version=radicale.VERSION,
description="CalDAV and CardDAV Server",
long_description=__doc__,
author="Guillaume Ayoub",
author_email="guillaume.ayoub@kozea.fr",
url="http://www.radicale.org/",
download_url=("http://pypi.python.org/packages/source/R/Radicale/"
"Radicale-%s.tar.gz" % radicale.VERSION),
license="GNU GPL v3",
platforms="Any",
packages=[
"radicale", "radicale.auth", "radicale.storage", "radicale.rights"],
provides=["radicale"],
scripts=["bin/radicale"],
keywords=["calendar", "addressbook", "CalDAV", "CardDAV"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Office/Business :: Groupware"])
|
UIKit0/Radicale
|
setup.py
|
Python
|
gpl-3.0
| 3,051
|
[
"VisIt"
] |
291e40c162011ee69767fcfdae654656d470ac75c497edb1410a513f8dc2841a
|
import os
import unittest
from pymatgen.core.periodic_table import Element
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.phonopy import *
import sys
if sys.version_info >= (3, 0):
try:
from phonopy import Phonopy
from phonopy.structure.atoms import PhonopyAtoms
from phonopy.file_IO import write_disp_yaml
except ImportError:
Phonopy = None
else:
Phonopy = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "phonopy")
class PhonopyParserTest(PymatgenTest):
def test_get_ph_bs(self):
ph_bs = get_ph_bs_symm_line(os.path.join(test_dir, 'NaCl_band.yaml'),
has_nac=True)
self.assertAlmostEqual(ph_bs.bands[1][10], 0.7753555184)
self.assertAlmostEqual(ph_bs.bands[5][100], 5.2548379776)
self.assertArrayEqual(ph_bs.bands.shape, (6, 204))
self.assertArrayEqual(ph_bs.eigendisplacements.shape, (6, 204, 2, 3))
self.assertArrayAlmostEqual(ph_bs.eigendisplacements[3][50][0],
[0. + 0.j, 0.14166569 + 0.04098339j,
-0.14166569 - 0.04098339j])
self.assertTrue(ph_bs.has_eigendisplacements, True)
self.assertArrayEqual(ph_bs.min_freq()[0].frac_coords, [0, 0, 0])
self.assertAlmostEqual(ph_bs.min_freq()[1], -0.03700895020)
self.assertTrue(ph_bs.has_imaginary_freq())
self.assertFalse(ph_bs.has_imaginary_freq(tol=0.5))
self.assertArrayAlmostEqual(ph_bs.asr_breaking(),
[-0.0370089502, -0.0370089502,
-0.0221388897])
self.assertEqual(ph_bs.nb_bands, 6)
self.assertEqual(ph_bs.nb_qpoints, 204)
self.assertArrayAlmostEqual(ph_bs.qpoints[1].frac_coords, [0.01, 0, 0])
self.assertTrue(ph_bs.has_nac)
self.assertAlmostEqual(
ph_bs.get_nac_frequencies_along_dir([1, 1, 0])[3], 4.6084532143)
self.assertIsNone(ph_bs.get_nac_frequencies_along_dir([1, 1, 1]))
self.assertArrayAlmostEqual(
ph_bs.get_nac_eigendisplacements_along_dir([1, 1, 0])[3][1],
[(0.1063906409128248 + 0j), 0j, 0j])
self.assertIsNone(ph_bs.get_nac_eigendisplacements_along_dir([1, 1, 1]))
def test_get_ph_dos(self):
dos = get_ph_dos(os.path.join(test_dir, 'NaCl_total_dos.dat'))
self.assertAlmostEqual(dos.densities[15], 0.0001665998)
self.assertAlmostEqual(dos.frequencies[20], 0.0894965119)
self.assertAlmostEqual(dos.get_interpolated_value(3.),
1.2915532670115628)
self.assertEqual(len(dos.frequencies), 201)
self.assertEqual(len(dos.densities), 201)
def test_get_complete_dos(self):
cdos = get_complete_ph_dos(
os.path.join(test_dir, 'NaCl_partial_dos.dat'),
os.path.join(test_dir, 'NaCl_phonopy.yaml'))
site_Na = cdos.structure[0]
site_Cl = cdos.structure[1]
self.assertEqual(len(cdos.frequencies), 201)
self.assertAlmostEqual(cdos.pdos[site_Na][30], 0.008058208)
self.assertAlmostEqual(cdos.pdos[site_Cl][30], 0.0119040783)
self.assertIn(Element.Na, cdos.get_element_dos())
self.assertIn(Element.Cl, cdos.get_element_dos())
@unittest.skipIf(Phonopy is None, "Phonopy not present")
class StructureConversionTest(PymatgenTest):
def test_structure_conversion(self):
s_pmg = PymatgenTest.get_structure("LiFePO4")
s_ph = get_phonopy_structure(s_pmg)
s_pmg2 = get_pmg_structure(s_ph)
coords_ph = s_ph.get_scaled_positions()
symbols_pmg = set([e.symbol for e in s_pmg.composition.keys()])
symbols_pmg2 = set([e.symbol for e in s_pmg2.composition.keys()])
self.assertAlmostEqual(s_ph.get_cell()[1, 1],
s_pmg.lattice._matrix[1, 1], 7)
self.assertAlmostEqual(s_pmg.lattice._matrix[1, 1],
s_pmg2.lattice._matrix[1, 1], 7)
self.assertEqual(symbols_pmg, set(s_ph.symbols))
self.assertEqual(symbols_pmg, symbols_pmg2)
self.assertArrayAlmostEqual(coords_ph[3], s_pmg.frac_coords[3])
self.assertArrayAlmostEqual(s_pmg.frac_coords[3], s_pmg2.frac_coords[3])
self.assertEqual(s_ph.get_number_of_atoms(), s_pmg.num_sites)
self.assertEqual(s_pmg.num_sites, s_pmg2.num_sites)
@unittest.skipIf(Phonopy is None, "Phonopy not present")
class GetDisplacedStructuresTest(PymatgenTest):
def test_get_displaced_structures(self):
pmg_s = Structure.from_file(os.path.join(test_dir, "POSCAR-unitcell"),
False)
supercell_matrix = [[2, 0, 0], [0, 1, 0], [0, 0, 2]]
structures = get_displaced_structures(pmg_structure=pmg_s,
atom_disp=0.01,
supercell_matrix=supercell_matrix)
self.assertEqual(len(structures), 49)
self.assertArrayAlmostEqual(structures[4].frac_coords[0],
np.array(
[0.10872682, 0.21783039, 0.12595286]),
7)
self.assertArrayAlmostEqual(structures[-1].frac_coords[9],
np.array(
[0.89127318, 0.78130015, 0.37404715]),
7)
self.assertEqual(structures[0].num_sites, 128)
self.assertEqual(structures[10].num_sites, 128)
self.assertArrayAlmostEqual(structures[0].lattice._matrix,
structures[8].lattice._matrix, 8)
if __name__ == '__main__':
unittest.main()
|
montoyjh/pymatgen
|
pymatgen/io/tests/test_phonopy.py
|
Python
|
mit
| 5,833
|
[
"phonopy",
"pymatgen"
] |
c3f098455bbd460cefcd84fb05699ca753d34f99fa04b09de14efca576f1a0b2
|
#!/usr/bin/env python3
import sys
import numpy as np
import argparse
import matplotlib.pyplot as plt
from analysisTools import sensibleIds, groundOffset, quadrantAnalysis
from netcdfTools import read3dDataFromNetCDF, netcdfOutputDataset, \
createNetcdfVariable, netcdfWriteAndClose
from utilities import filesFromList, inputIfNone
from txtTools import openIOFile
'''
Description: Reynolds stress calculator.
In case of PALM-generated results (featuring staggered grid), the velocity data must first be
interpolated onto cell-centers (i.e. scalar grid) with groupVectorDataNetCdf.py script.
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
sepStr = ' # = # = # = # = # = # = # = # = '
parser = argparse.ArgumentParser()
parser.add_argument("strKey", type=str,nargs='?', default=None,\
help="Search string for collecting input NETCDF files.")
parser.add_argument("-v", "--varnames", type=str, nargs=2, default=['u','w'],\
help="Name of the variables in NETCDF file. Default=[u, w]")
parser.add_argument("-i1", "--ijk1",type=int, nargs=3,\
help="Starting indices (ix, iy, iz) of the considered data. Required.")
parser.add_argument("-i2", "--ijk2",type=int, nargs=3,\
help="Final indices (ix, iy, iz) of the considered data. Required.")
parser.add_argument("-vs", "--vstar",type=float, nargs=2, default=[1.,1.],\
help="Characteristic value v* (vs) used in (v+ =(v-v0)/v*). Default=[1,1].")
parser.add_argument("-v0", "--vref",type=float, nargs=2, default=[0.,0.],\
help="Reference value v0 (vref) used in (v+ =(v-v0)/v*). Default=[0,0].")
parser.add_argument("-xs", "--xscale",type=float, default=1.,\
help="Coordinate scaling value (xs) used in (x+ =x/xs). Default=1.")
parser.add_argument("-of", "--outputToFile", type=str, default=None, \
help="Name of the file to output analysis results. Default=None")
parser.add_argument("-p", "--printOn", action="store_true", default=False,\
help="Print the numpy array data.")
args = parser.parse_args()
#==========================================================#
# Rename ...
strKey = args.strKey
varnames = args.varnames
v0 = np.array( args.vref ) # Convert to numpy array
vs = np.array( args.vstar )
xs = args.xscale
ijk1 = args.ijk1
ijk2 = args.ijk2
printOn = args.printOn
#==========================================================#
'''
Establish two boolean variables which indicate whether the created variable is an
independent or dependent variable in function createNetcdfVariable().
'''
parameter = True; variable = False
strKey = inputIfNone( strKey , " Enter search string: " )
fileNos, fileList = filesFromList( strKey+"*")
for fn in fileNos:
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# First fluctuation component
cl = 1
ncDict = read3dDataFromNetCDF( fileList[fn] , varnames[0], cl )
v1 = ncDict['v'] # 'v' is a generic name for a variable in ncDict
# Second fluctuation component
ncDict = read3dDataFromNetCDF( fileList[fn] , varnames[1], cl )
v2 = ncDict['v']
# Dims
nt, nz, ny, nx = np.shape( v1 )
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Spatial coords and time
x = ncDict['x']; y = ncDict['y']; z = ncDict['z']
time = ncDict['time']
ncDict = None
# Plot coord. information. This aids the user in the beginning.
infoStr = '''
Coord. range:
min(x)={0} ... max(x)={1}, nx = {2}
min(y)={3} ... max(y)={4}, ny = {5}
min(z)={6} ... max(z)={7}, nz = {8}
'''.format(\
np.min(x), np.max(x), len(x),\
np.min(y), np.max(y), len(y),\
np.min(z), np.max(z), len(z) )
#print(infoStr)
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Non-dimensionalize the time series
v1 -= v0[0]; v2 -= v0[1]
v1 /= vs[0]; v2 /= vs[1]
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Extract the fluctuations
v1m = np.mean(v1, axis=(0))
v2m = np.mean(v2, axis=(0))
# Extract fluctuating part and normalize by variance
# Reuse the v1 and v2 variables to store values
v1 -= v1m; v2 -= v2m
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Now check whether the given indices make sense
ijk1 = sensibleIds( np.array( ijk1 ), x, y, z )
ijk2 = sensibleIds( np.array( ijk2 ), x, y, z )
print(' Check (1): i, j, k = {}'.format(ijk1))
print(' Check (2): i, j, k = {}'.format(ijk2))
nvz = (ijk2[2]-ijk1[2])+1; idz = range(ijk1[2],ijk2[2]+1)
nvy = (ijk2[1]-ijk1[1])+1; idy = range(ijk1[1],ijk2[1]+1)
nvx = (ijk2[0]-ijk1[0])+1; idx = range(ijk1[0],ijk2[0]+1)
Cv = np.zeros( ( nt, nvz, nvy, nvx ) )
d = np.zeros( ( nvz, nvy, nvx ) )
zd = np.zeros( ( nvz ) )
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Compute covariance
for i in range(nvx):
for j in range(nvy):
for k in range(nvz):
if( np.abs(v1[1,idz[k],idy[j],idx[i]])>1e-7 and np.abs(v2[1,idz[k],idy[j],idx[i]])>1e-7 ):
Cv[:,k,j,i] = v1[ :,idz[k], idy[j], idx[i] ] * v2[ :,idz[k], idy[j], idx[i] ]
else:
Cv[:,k,j,i] = np.nan
d[k,j,i] = np.sqrt( (z[idz[k]]-z[0])**2 + (y[idy[j]]-y[0])**2 + (x[idx[i]]-x[0])**2 )
zd[k] = np.abs(z[idz[k]]-z[0])
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Reynolds stress
Rs = np.nanmean( Cv, axis=(0) )
Rs_havg = np.nanmean( Rs , axis=(1,2) ) # average over x and y
hStr = " Reynolds averaged {}'{}' between ijk {} and {} ".format(varnames[0],varnames[1], ijk1,ijk2)
fileout = '{}{}_UEX'.format(varnames[0],varnames[1]) + fileList[fn].split('/')[-1]
fileout = fileout.strip('.nc') + '.dat'
np.savetxt(fileout, np.c_[ (1./xs)*d.ravel(), Rs.ravel() ], fmt='%3.6e', header=hStr)
# - - - - - <RS> - - - - - #
hStr = " Horizontally and Reynolds avg {}'{}' between z=[{},{}]".format(varnames[0],varnames[1], zd[0],zd[-1])
fileout = 'DA_{}{}_'.format(varnames[0],varnames[1]) + fileList[fn].split('/')[-1]
fileout = fileout.strip('.nc') + '.dat'
np.savetxt(fileout, np.c_[ (1./xs)*zd.ravel(), Rs_havg.ravel() ], fmt='%3.6e', header=hStr)
|
mjsauvinen/P4UL
|
pyNetCDF/reynoldsStressNetCdf.py
|
Python
|
mit
| 6,222
|
[
"NetCDF"
] |
1dd5fe079791e1e1823858699efa30beee58e47e3348cc62c21bea6bb8db643b
|
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from unittest import mock
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
ignore_warnings, override_settings,
)
from django.test.signals import setting_changed
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Nonexistent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertEqual(cache.get_or_set('mykey', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache.set(key, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
self.assertEqual(str(w[0].message.args[0]), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
expected_warning = (
"Cache key contains characters that will cause errors if used "
"with memcached: %r" % key
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super().tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', database='default', verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable', database='other', verbosity=0)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
with self.subTest(location=location):
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def test_invalid_key_characters(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
# when using the ascii protocol.
with self.assertRaises(Exception):
cache.set('key with spaces', 'value')
def test_invalid_key_length(self):
# memcached limits key length to 250
with self.assertRaises(Exception):
cache.set('a' * 251, 'value')
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Some clients (e.g. pylibmc) raise when the value is too large,
# while others (e.g. python-memcached) intentionally return True
# indicating success. This test is primarily checking that the key
# was deleted, so the return/exception behavior for the set()
# itself is not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
with self.subTest(cache_key=cache_key):
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
# libmemcached manages its own connections.
should_disconnect_on_close = False
# By default, pylibmc/libmemcached don't verify keys client-side and so
# this test triggers a server-side bug that causes later tests to fail
# (#19914). The `verify_keys` behavior option could be set to True (which
# would avoid triggering the server-side bug), however this test would
# still fail due to https://github.com/lericson/pylibmc/issues/219.
@unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail")
def test_invalid_key_characters(self):
pass
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'tcp_nodelay': True},
))
def test_pylibmc_legacy_options(self):
deprecation_message = (
"Specifying pylibmc cache behaviors as a top-level property "
"within `OPTIONS` is deprecated. Move `tcp_nodelay` into a dict named "
"`behaviors` inside `OPTIONS` instead."
)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
self.assertEqual(len(warns), 1)
self.assertIsInstance(warns[0].message, RemovedInDjango21Warning)
self.assertEqual(str(warns[0].message), deprecation_message)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super().setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super().tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
def test_get_does_not_ignore_non_filenotfound_exceptions(self):
with mock.patch('builtins.open', side_effect=IOError):
with self.assertRaises(IOError):
cache.get('foo')
def test_empty_cache_file_considered_expired(self):
cache_file = cache._key_to_file('foo')
with open(cache_file, 'wb') as fh:
fh.write(b'')
with open(cache_file, 'rb') as fh:
self.assertIs(cache._is_expired(fh), True)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
with self.subTest(initial_cc=initial_cc, newheaders=newheaders):
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = timezone.get_current_timezone_name()
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Timezone-dependent cache keys should use ASCII characters only
# (#17476). The implementation here is a bit odd (timezone.utc is an
# instance, not a class), but it simulates the correct conditions.
class CustomTzName(timezone.utc):
pass
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName):
CustomTzName.zone = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
@ignore_warnings(category=RemovedInDjango21Warning) # USE_ETAGS=True
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# ETags are used.
self.assertTrue(get_cache_data.has_header('ETag'))
# ETags can be disabled.
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
def setUp(self):
super().setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super().tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@ignore_warnings(category=RemovedInDjango21Warning)
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
sjlehtin/django
|
tests/cache/tests.py
|
Python
|
bsd-3-clause
| 91,787
|
[
"Brian"
] |
5de2db7bb459f4df0e71966776fdaf78b1b9dbda639bb83a3b9a3db31330f6b3
|
# $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
try:
from reportlab import platypus
except ImportError:
import sys
sys.stderr.write('ReportLab module could not be imported. Db->PDF functionality not available')
GetReportlabTable = None
QuickReport = None
else:
from rdkit import Chem
try:
from pyRDkit.utils import chemdraw
except ImportError:
hasCDX=0
else:
hasCDX=1
from rdkit.utils import cactvs
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import DrawUtils
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbInfo
from rdkit.Reports.PDFReport import PDFReport,ReportUtils
import os,tempfile,sys
def GetReportlabTable(self,*args,**kwargs):
""" this becomes a method of DbConnect """
dbRes = self.GetData(*args,**kwargs)
rawD = [dbRes.GetColumnNames()]
colTypes = dbRes.GetColumnTypes()
binCols = []
for i in range(len(colTypes)):
if colTypes[i] in DbInfo.sqlBinTypes or colTypes[i]=='binary':
binCols.append(i)
nRows = 0
for entry in dbRes:
nRows += 1
for col in binCols:
entry = list(entry)
entry[col] = 'N/A'
rawD.append(entry)
#if nRows >10: break
res = platypus.Table(rawD)
return res
from reportlab.lib.units import inch
class CDXImageTransformer(object):
def __init__(self,smiCol,width=1,verbose=1,tempHandler=None):
self.smiCol = smiCol
if tempHandler is None:
tempHandler = ReportUtils.TempFileHandler()
self.tempHandler = tempHandler
self.width = width*inch
self.verbose=verbose
def __call__(self,arg):
res = list(arg)
if self.verbose:
print 'Render:',res[0]
if hasCDX:
smi = res[self.smiCol]
tmpName = self.tempHandler.get('.jpg')
try:
img = chemdraw.SmilesToPilImage(smi)
w,h = img.size
aspect = float(h)/w
img.save(tmpName)
img = platypus.Image(tmpName)
img.drawWidth = self.width
img.drawHeight = aspect*self.width
res[self.smiCol] = img
except:
import traceback
traceback.print_exc()
res[self.smiCol] = 'Failed'
return res
class CactvsImageTransformer(object):
def __init__(self,smiCol,width=1.,verbose=1,tempHandler=None):
self.smiCol = smiCol
if tempHandler is None:
tempHandler = ReportUtils.TempFileHandler()
self.tempHandler = tempHandler
self.width = width*inch
self.verbose=verbose
def __call__(self,arg):
res = list(arg)
if self.verbose:
sys.stderr.write('Render(%d): %s\n'%(self.smiCol,str(res[0])))
smi = res[self.smiCol]
tmpName = self.tempHandler.get('.gif')
aspect = 1
width = 300
height = aspect*width
ok = cactvs.SmilesToGif(smi,tmpName,(width,height))
if ok:
try:
img = platypus.Image(tmpName)
img.drawWidth = self.width
img.drawHeight = aspect*self.width
except:
ok = 0
if ok:
res[self.smiCol] = img
else:
# FIX: maybe include smiles here in a Paragraph?
res[self.smiCol] = 'Failed'
return res
from rdkit.sping.ReportLab.pidReportLab import RLCanvas as Canvas
from rdkit.Chem.Draw.MolDrawing import MolDrawing
class ReportLabImageTransformer(object):
def __init__(self,smiCol,width=1.,verbose=1,tempHandler=None):
self.smiCol = smiCol
self.width = width*inch
self.verbose=verbose
def __call__(self,arg):
res = list(arg)
if self.verbose:
sys.stderr.write('Render(%d): %s\n'%(self.smiCol,str(res[0])))
smi = res[self.smiCol]
aspect = 1
width = self.width
height = aspect*width
try:
mol = Chem.MolFromSmiles(smi)
Chem.Kekulize(mol)
canv = Canvas((width,height))
drawing = MolDrawing()
drawing.atomLabelMinFontSize=3
drawing.minLabelPadding=(.5,.5)
drawing.bondLineWidth=0.5
if not mol.GetNumConformers():
rdDepictor.Compute2DCoords(mol)
drawing.AddMol(mol,canvas=canv)
ok = True
except:
if self.verbose:
import traceback
traceback.print_exc()
ok = False
if ok:
res[self.smiCol] = canv.drawing
else:
# FIX: maybe include smiles here in a Paragraph?
res[self.smiCol] = 'Failed'
return res
class RDImageTransformer(object):
def __init__(self,smiCol,width=1.,verbose=1,tempHandler=None):
self.smiCol = smiCol
if tempHandler is None:
tempHandler = ReportUtils.TempFileHandler()
self.tempHandler = tempHandler
self.width = width*inch
self.verbose=verbose
def __call__(self,arg):
res = list(arg)
if self.verbose:
sys.stderr.write('Render(%d): %s\n'%(self.smiCol,str(res[0])))
smi = res[self.smiCol]
tmpName = self.tempHandler.get('.jpg')
aspect = 1
width = 300
height = aspect*width
ok = DrawUtils.SmilesToJpeg(smi,tmpName,size=(width,height))
if ok:
try:
img = platypus.Image(tmpName)
img.drawWidth = self.width
img.drawHeight = aspect*self.width
except:
ok = 0
if ok:
res[self.smiCol] = img
else:
# FIX: maybe include smiles here in a Paragraph?
res[self.smiCol] = 'Failed'
return res
def QuickReport(conn,fileName,*args,**kwargs):
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import inch
styles = getSampleStyleSheet()
title = 'Db Report'
if kwargs.has_key('title'):
title = kwargs['title']
del kwargs['title']
names = [x.upper() for x in conn.GetColumnNames()]
try:
smiCol = names.index('SMILES')
except ValueError:
try:
smiCol = names.index('SMI')
except ValueError:
smiCol = -1
if smiCol >-1:
if hasCDX:
tform = CDXImageTransformer(smiCol)
elif 1:
tform = ReportLabImageTransformer(smiCol)
else:
tform = CactvsImageTransformer(smiCol)
else:
tform = None
kwargs['transform'] = tform
tbl = conn.GetReportlabTable(*args,**kwargs)
tbl.setStyle(platypus.TableStyle([('GRID',(0,0),(-1,-1),1,colors.black),
('FONT',(0,0),(-1,-1),'Times-Roman',8),
]))
if smiCol >-1 and tform:
tbl._argW[smiCol] = tform.width*1.2
elements = [tbl]
reportTemplate = PDFReport()
reportTemplate.pageHeader = title
doc = platypus.SimpleDocTemplate(fileName)
doc.build(elements,onFirstPage=reportTemplate.onPage,
onLaterPages=reportTemplate.onPage)
DbConnect.GetReportlabTable = GetReportlabTable
if __name__=='__main__':
import sys
dbName = sys.argv[1]
tblName = sys.argv[2]
fName = 'report.pdf'
conn = DbConnect(dbName,tblName)
QuickReport(conn,fName,where="where mol_id in ('1','100','104','107')")
|
rdkit/rdkit-orig
|
rdkit/Dbase/DbReport.py
|
Python
|
bsd-3-clause
| 7,416
|
[
"RDKit"
] |
9253b2752d61ab35e803af1ff97d2b1524e49b07197bfcdc76fab29b836ae4ca
|
# -*- python -*-
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/home/wengbj/project/openwrt-3.10/firefly/mtksdk-openwrt-3.10.14/staging_dir/toolchain-mipsel_24kec+dsp_gcc-4.8-linaro_uClibc-0.9.33.2/share/gcc-4.8.3/python'
libdir = '/home/wengbj/project/openwrt-3.10/firefly/mtksdk-openwrt-3.10.14/staging_dir/toolchain-mipsel_24kec+dsp_gcc-4.8-linaro_uClibc-0.9.33.2/mipsel-openwrt-linux-uclibc/lib'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
|
h4ck3rm1k3/OpenWrt-Firefly-SDK
|
staging_dir/toolchain-mipsel_24kec+dsp_gcc-4.8-linaro_uClibc-0.9.33.2/lib/libstdc++.so.6.0.19-gdb.py
|
Python
|
gpl-2.0
| 2,580
|
[
"Firefly"
] |
52cf6f0f80f1d48db22e1a2400ce5b496f357689d08e9a1446c9d23ebf900b26
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition
# -----------------------------------------------------------------
# Create the configuration definition
definition = ConfigurationDefinition()
# Required
definition.add_positional_optional("image", "file_path", "name/path of the input image")
# Input and output
definition.add_optional("input", "directory_path", "input directory path", letter="i")
definition.add_optional("output", "directory_path", "output directory path", letter="o")
# Regions
definition.add_optional("special_region", "file_path", "region indicating areas that require special attention")
definition.add_optional("ignore_region", "file_path", "region indicating areas that should be ignored")
definition.add_optional("bad", "file_path", "region specifying areas that have to be added to the mask of bad pixels")
definition.add_flag("animation", "make an animation of the extraction procedure")
# Detailed settings
definition.add_optional("interpolation_method", "string", "interpolation method", "pts")
definition.add_flag("sigma_clip", "perform sigma-clipping when interpolating", True)
definition.add_optional("source_outer_factor", "real", "outer factor", 1.4)
definition.add_flag("dilate_saturation", "dilate saturation")
definition.add_optional("saturation_dilation_factor", "real", "saturation dilation factor", 2.0)
definition.add_flag("dilate_other", "dilate other sources")
definition.add_optional("other_dilation_factor", "real", "dilation factor for other sources", 2.0)
definition.add_flag("only_foreground", "only interpolate over the stars that are in the foreground of the galaxy", False)
definition.add_flag("write", "do writing", True)
# Flags
definition.add_flag("remove_companions", "remove companion galaxies", False)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
magic/config/extract.py
|
Python
|
agpl-3.0
| 2,224
|
[
"Galaxy"
] |
750de49014f46483d8421f49e5c80e6ae66ee1450bd194572ae8a1ad04ab3a69
|
"""
Distributions
"""
from __future__ import division, absolute_import
from functools import wraps
from abc import ABCMeta
from abc import abstractmethod
import scipy as sp
import numpy as np
from pygam.core import Core
from pygam.utils import ylogydu
def multiply_weights(deviance):
@wraps(deviance)
def multiplied(self, y, mu, weights=None, **kwargs):
if weights is None:
weights = np.ones_like(mu)
return deviance(self, y, mu, **kwargs) * weights
return multiplied
def divide_weights(V):
@wraps(V)
def divided(self, mu, weights=None, **kwargs):
if weights is None:
weights = np.ones_like(mu)
return V(self, mu, **kwargs) / weights
return divided
class Distribution(Core):
__metaclass__ = ABCMeta
"""
base distribution class
"""
def __init__(self, name=None, scale=None):
"""
creates an instance of the Distribution class
Parameters
----------
name : str, default: None
scale : float or None, default: None
scale/standard deviation of the distribution
Returns
-------
self
"""
self.scale = scale
self._known_scale = self.scale is not None
super(Distribution, self).__init__(name=name)
if not self._known_scale:
self._exclude += ['scale']
def phi(self, y, mu, edof, weights):
"""
GLM scale parameter.
for Binomial and Poisson families this is unity
for Normal family this is variance
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
edof : float
estimated degrees of freedom
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
scale : estimated model scale
"""
if self._known_scale:
return self.scale
else:
return (np.sum(weights * self.V(mu)**-1 * (y - mu)**2) /
(len(mu) - edof))
@abstractmethod
def sample(self, mu):
"""
Return random samples from this distribution.
Parameters
----------
mu : array-like of shape n_samples or shape (n_simulations, n_samples)
expected values
Returns
-------
random_samples : np.array of same shape as mu
"""
pass
class NormalDist(Distribution):
"""
Normal Distribution
"""
def __init__(self, scale=None):
"""
creates an instance of the NormalDist class
Parameters
----------
scale : float or None, default: None
scale/standard deviation of the distribution
Returns
-------
self
"""
super(NormalDist, self).__init__(name='normal', scale=scale)
def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu)
scale = self.scale / weights
return sp.stats.norm.logpdf(y, loc=mu, scale=scale)
@divide_weights
def V(self, mu):
"""
glm Variance function.
if
Y ~ ExpFam(theta, scale=phi)
such that
E[Y] = mu = b'(theta)
and
Var[Y] = b''(theta) * phi / w
then we seek V(mu) such that we can represent Var[y] as a fn of mu:
Var[Y] = V(mu) * phi
ie
V(mu) = b''(theta) / w
Parameters
----------
mu : array-like of length n
expected values
Returns
-------
V(mu) : np.array of length n
"""
return np.ones_like(mu)
@multiply_weights
def deviance(self, y, mu, scaled=True):
"""
model deviance
for a gaussian linear model, this is equal to the SSE
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
scaled : boolean, default: True
whether to divide the deviance by the distribution scaled
Returns
-------
deviances : np.array of length n
"""
dev = (y - mu)**2
if scaled:
dev /= self.scale
return dev
def sample(self, mu):
"""
Return random samples from this Normal distribution.
Samples are drawn independently from univariate normal distributions
with means given by the values in `mu` and with standard deviations
equal to the `scale` attribute if it exists otherwise 1.0.
Parameters
----------
mu : array-like of shape n_samples or shape (n_simulations, n_samples)
expected values
Returns
-------
random_samples : np.array of same shape as mu
"""
standard_deviation = self.scale**0.5 if self.scale else 1.0
return np.random.normal(loc=mu, scale=standard_deviation, size=None)
class BinomialDist(Distribution):
"""
Binomial Distribution
"""
def __init__(self, levels=1):
"""
creates an instance of the Binomial class
Parameters
----------
levels : int of None, default: 1
number of trials in the binomial distribution
Returns
-------
self
"""
if levels is None:
levels = 1
self.levels = levels
super(BinomialDist, self).__init__(name='binomial', scale=1.)
self._exclude.append('scale')
def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu)
n = self.levels
p = mu / self.levels
return sp.stats.binom.logpmf(y, n, p)
@divide_weights
def V(self, mu):
"""
glm Variance function
computes the variance of the distribution
Parameters
----------
mu : array-like of length n
expected values
Returns
-------
variance : np.array of length n
"""
return mu * (1 - mu / self.levels)
@multiply_weights
def deviance(self, y, mu, scaled=True):
"""
model deviance
for a bernoulli logistic model, this is equal to the twice the
negative loglikelihod.
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
scaled : boolean, default: True
whether to divide the deviance by the distribution scaled
Returns
-------
deviances : np.array of length n
"""
dev = 2 * (ylogydu(y, mu) + ylogydu(self.levels - y, self.levels - mu))
if scaled:
dev /= self.scale
return dev
def sample(self, mu):
"""
Return random samples from this Normal distribution.
Parameters
----------
mu : array-like of shape n_samples or shape (n_simulations, n_samples)
expected values
Returns
-------
random_samples : np.array of same shape as mu
"""
number_of_trials = self.levels
success_probability = mu / number_of_trials
return np.random.binomial(n=number_of_trials, p=success_probability,
size=None)
class PoissonDist(Distribution):
"""
Poisson Distribution
"""
def __init__(self):
"""
creates an instance of the PoissonDist class
Parameters
----------
None
Returns
-------
self
"""
super(PoissonDist, self).__init__(name='poisson', scale=1.)
self._exclude.append('scale')
def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu)
# in Poisson regression weights are proportional to the exposure
# so we want to pump up all our predictions
# NOTE: we assume the targets are counts, not rate.
# ie if observations were scaled to account for exposure, they have
# been rescaled before calling this function.
# since some samples have higher exposure,
# they also need to have higher variance,
# we do this by multiplying mu by the weight=exposure
mu = mu * weights
return sp.stats.poisson.logpmf(y, mu=mu)
@divide_weights
def V(self, mu):
"""
glm Variance function
computes the variance of the distribution
Parameters
----------
mu : array-like of length n
expected values
Returns
-------
variance : np.array of length n
"""
return mu
@multiply_weights
def deviance(self, y, mu, scaled=True):
"""
model deviance
for a bernoulli logistic model, this is equal to the twice the
negative loglikelihod.
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
scaled : boolean, default: True
whether to divide the deviance by the distribution scaled
Returns
-------
deviances : np.array of length n
"""
dev = 2 * (ylogydu(y, mu) - (y - mu))
if scaled:
dev /= self.scale
return dev
def sample(self, mu):
"""
Return random samples from this Poisson distribution.
Parameters
----------
mu : array-like of shape n_samples or shape (n_simulations, n_samples)
expected values
Returns
-------
random_samples : np.array of same shape as mu
"""
return np.random.poisson(lam=mu, size=None)
class GammaDist(Distribution):
"""
Gamma Distribution
"""
def __init__(self, scale=None):
"""
creates an instance of the GammaDist class
Parameters
----------
scale : float or None, default: None
scale/standard deviation of the distribution
Returns
-------
self
"""
super(GammaDist, self).__init__(name='gamma', scale=scale)
def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu)
nu = weights / self.scale
return sp.stats.gamma.logpdf(x=y, a=nu, scale=mu / nu)
@divide_weights
def V(self, mu):
"""
glm Variance function
computes the variance of the distribution
Parameters
----------
mu : array-like of length n
expected values
Returns
-------
variance : np.array of length n
"""
return mu**2
@multiply_weights
def deviance(self, y, mu, scaled=True):
"""
model deviance
for a bernoulli logistic model, this is equal to the twice the
negative loglikelihod.
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
scaled : boolean, default: True
whether to divide the deviance by the distribution scaled
Returns
-------
deviances : np.array of length n
"""
dev = 2 * ((y - mu) / mu - np.log(y / mu))
if scaled:
dev /= self.scale
return dev
def sample(self, mu):
"""
Return random samples from this Gamma distribution.
Parameters
----------
mu : array-like of shape n_samples or shape (n_simulations, n_samples)
expected values
Returns
-------
random_samples : np.array of same shape as mu
"""
# in numpy.random.gamma, `shape` is the parameter sometimes denoted by
# `k` that corresponds to `nu` in S. Wood (2006) Table 2.1
shape = 1. / self.scale
# in numpy.random.gamma, `scale` is the parameter sometimes denoted by
# `theta` that corresponds to mu / nu in S. Wood (2006) Table 2.1
scale = mu / shape
return np.random.gamma(shape=shape, scale=scale, size=None)
class InvGaussDist(Distribution):
"""
Inverse Gaussian (Wald) Distribution
"""
def __init__(self, scale=None):
"""
creates an instance of the InvGaussDist class
Parameters
----------
scale : float or None, default: None
scale/standard deviation of the distribution
Returns
-------
self
"""
super(InvGaussDist, self).__init__(name='inv_gauss', scale=scale)
def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu)
gamma = weights / self.scale
return sp.stats.invgauss.logpdf(y, mu, scale=1./gamma)
@divide_weights
def V(self, mu):
"""
glm Variance function
computes the variance of the distribution
Parameters
----------
mu : array-like of length n
expected values
Returns
-------
variance : np.array of length n
"""
return mu**3
@multiply_weights
def deviance(self, y, mu, scaled=True):
"""
model deviance
for a bernoulli logistic model, this is equal to the twice the
negative loglikelihod.
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
scaled : boolean, default: True
whether to divide the deviance by the distribution scaled
Returns
-------
deviances : np.array of length n
"""
dev = ((y - mu)**2) / (mu**2 * y)
if scaled:
dev /= self.scale
return dev
def sample(self, mu):
"""
Return random samples from this Inverse Gaussian (Wald) distribution.
Parameters
----------
mu : array-like of shape n_samples or shape (n_simulations, n_samples)
expected values
Returns
-------
random_samples : np.array of same shape as mu
"""
return np.random.wald(mean=mu, scale=self.scale, size=None)
DISTRIBUTIONS = {'normal': NormalDist,
'poisson': PoissonDist,
'binomial': BinomialDist,
'gamma': GammaDist,
'inv_gauss': InvGaussDist
}
|
dswah/pyGAM
|
pygam/distributions.py
|
Python
|
apache-2.0
| 17,299
|
[
"Gaussian"
] |
0efe767e90f7592b7fc479c70acdf11f960f79d0d71d78ec06855515fecbd348
|
# -*- coding: utf-8 -*-
#
# network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""PyNEST Microcircuit: Network Class
----------------------------------------
Main file of the microcircuit defining the ``Network`` class with functions to
build and simulate the network.
"""
import os
import numpy as np
import nest
import helpers
class Network:
""" Provides functions to setup NEST, to create and connect all nodes of
the network, to simulate, and to evaluate the resulting spike data.
Instantiating a Network object derives dependent parameters and already
initializes the NEST kernel.
Parameters
---------
sim_dict
Dictionary containing all parameters specific to the simulation
(see: ``sim_params.py``).
net_dict
Dictionary containing all parameters specific to the neuron and
network models (see: ``network_params.py``).
stim_dict
Optional dictionary containing all parameter specific to the stimulus
(see: ``stimulus_params.py``)
"""
def __init__(self, sim_dict, net_dict, stim_dict=None):
self.sim_dict = sim_dict
self.net_dict = net_dict
self.stim_dict = stim_dict
# data directory
self.data_path = sim_dict['data_path']
if nest.Rank() == 0:
if os.path.isdir(self.data_path):
message = ' Directory already existed.'
if self.sim_dict['overwrite_files']:
message += ' Old data will be overwritten.'
else:
os.mkdir(self.data_path)
message = ' Directory has been created.'
print('Data will be written to: {}\n{}\n'.format(self.data_path,
message))
# derive parameters based on input dictionaries
self.__derive_parameters()
# initialize the NEST kernel
self.__setup_nest()
def create(self):
""" Creates all network nodes.
Neuronal populations and recording and stimulating devices are created.
"""
self.__create_neuronal_populations()
if len(self.sim_dict['rec_dev']) > 0:
self.__create_recording_devices()
if self.net_dict['poisson_input']:
self.__create_poisson_bg_input()
if self.stim_dict['thalamic_input']:
self.__create_thalamic_stim_input()
if self.stim_dict['dc_input']:
self.__create_dc_stim_input()
def connect(self):
""" Connects the network.
Recurrent connections among neurons of the neuronal populations are
established, and recording and stimulating devices are connected.
The ``self.__connect_*()`` functions use ``nest.Connect()`` calls which
set up the postsynaptic connectivity.
Since the introduction of the 5g kernel in NEST 2.16.0 the full
connection infrastructure including presynaptic connectivity is set up
afterwards in the preparation phase of the simulation.
The preparation phase is usually induced by the first
``nest.Simulate()`` call.
For including this phase in measurements of the connection time,
we induce it here explicitly by calling ``nest.Prepare()``.
"""
self.__connect_neuronal_populations()
if len(self.sim_dict['rec_dev']) > 0:
self.__connect_recording_devices()
if self.net_dict['poisson_input']:
self.__connect_poisson_bg_input()
if self.stim_dict['thalamic_input']:
self.__connect_thalamic_stim_input()
if self.stim_dict['dc_input']:
self.__connect_dc_stim_input()
nest.Prepare()
nest.Cleanup()
def simulate(self, t_sim):
""" Simulates the microcircuit.
Parameters
----------
t_sim
Simulation time (in ms).
"""
if nest.Rank() == 0:
print('Simulating {} ms.'.format(t_sim))
nest.Simulate(t_sim)
def evaluate(self, raster_plot_interval, firing_rates_interval):
""" Displays simulation results.
Creates a spike raster plot.
Calculates the firing rate of each population and displays them as a
box plot.
Parameters
----------
raster_plot_interval
Times (in ms) to start and stop loading spike times for raster plot
(included).
firing_rates_interval
Times (in ms) to start and stop lading spike times for computing
firing rates (included).
Returns
-------
None
"""
if nest.Rank() == 0:
print('Interval to plot spikes: {} ms'.format(raster_plot_interval))
helpers.plot_raster(
self.data_path,
'spike_detector',
raster_plot_interval[0],
raster_plot_interval[1],
self.net_dict['N_scaling'])
print('Interval to compute firing rates: {} ms'.format(
firing_rates_interval))
helpers.firing_rates(
self.data_path, 'spike_detector',
firing_rates_interval[0], firing_rates_interval[1])
helpers.boxplot(self.data_path, self.net_dict['populations'])
def __derive_parameters(self):
"""
Derives and adjusts parameters and stores them as class attributes.
"""
self.num_pops = len(self.net_dict['populations'])
# total number of synapses between neuronal populations before scaling
full_num_synapses = helpers.num_synapses_from_conn_probs(
self.net_dict['conn_probs'],
self.net_dict['full_num_neurons'],
self.net_dict['full_num_neurons'])
# scaled numbers of neurons and synapses
self.num_neurons = np.round((self.net_dict['full_num_neurons'] *
self.net_dict['N_scaling'])).astype(int)
self.num_synapses = np.round((full_num_synapses *
self.net_dict['N_scaling'] *
self.net_dict['K_scaling'])).astype(int)
self.ext_indegrees = np.round((self.net_dict['K_ext'] *
self.net_dict['K_scaling'])).astype(int)
# conversion from PSPs to PSCs
PSC_over_PSP = helpers.postsynaptic_potential_to_current(
self.net_dict['neuron_params']['C_m'],
self.net_dict['neuron_params']['tau_m'],
self.net_dict['neuron_params']['tau_syn'])
PSC_matrix_mean = self.net_dict['PSP_matrix_mean'] * PSC_over_PSP
PSC_ext = self.net_dict['PSP_exc_mean'] * PSC_over_PSP
# DC input compensates for potentially missing Poisson input
if self.net_dict['poisson_input']:
DC_amp = np.zeros(self.num_pops)
else:
if nest.Rank() == 0:
print('DC input compensates for missing Poisson input.\n')
DC_amp = helpers.dc_input_compensating_poisson(
self.net_dict['bg_rate'], self.net_dict['K_ext'],
self.net_dict['neuron_params']['tau_syn'],
PSC_ext)
# adjust weights and DC amplitude if the indegree is scaled
if self.net_dict['K_scaling'] != 1:
PSC_matrix_mean, PSC_ext, DC_amp = \
helpers.adjust_weights_and_input_to_synapse_scaling(
self.net_dict['full_num_neurons'],
full_num_synapses, self.net_dict['K_scaling'],
PSC_matrix_mean, PSC_ext,
self.net_dict['neuron_params']['tau_syn'],
self.net_dict['full_mean_rates'],
DC_amp,
self.net_dict['poisson_input'],
self.net_dict['bg_rate'], self.net_dict['K_ext'])
# store final parameters as class attributes
self.weight_matrix_mean = PSC_matrix_mean
self.weight_ext = PSC_ext
self.DC_amp = DC_amp
# thalamic input
if self.stim_dict['thalamic_input']:
num_th_synapses = helpers.num_synapses_from_conn_probs(
self.stim_dict['conn_probs_th'],
self.stim_dict['num_th_neurons'],
self.net_dict['full_num_neurons'])[0]
self.weight_th = self.stim_dict['PSP_th'] * PSC_over_PSP
if self.net_dict['K_scaling'] != 1:
num_th_synapses *= self.net_dict['K_scaling']
self.weight_th /= np.sqrt(self.net_dict['K_scaling'])
self.num_th_synapses = np.round(num_th_synapses).astype(int)
if nest.Rank() == 0:
message = ''
if self.net_dict['N_scaling'] != 1:
message += \
'Neuron numbers are scaled by a factor of {:.3f}.\n'.format(
self.net_dict['N_scaling'])
if self.net_dict['K_scaling'] != 1:
message += \
'Indegrees are scaled by a factor of {:.3f}.'.format(
self.net_dict['K_scaling'])
message += '\n Weights and DC input are adjusted to compensate.\n'
print(message)
def __setup_nest(self):
""" Initializes the NEST kernel.
Reset the NEST kernel and pass parameters to it.
The number of seeds for random number generation are computed based on
the total number of virtual processes
(number of MPI processes x number of threads per MPI process).
"""
nest.ResetKernel()
# set seeds for random number generation
nest.SetKernelStatus(
{'local_num_threads': self.sim_dict['local_num_threads']})
N_vp = nest.GetKernelStatus('total_num_virtual_procs')
master_seed = self.sim_dict['master_seed']
grng_seed = master_seed + N_vp
rng_seeds = (master_seed + N_vp + 1 + np.arange(N_vp)).tolist()
if nest.Rank() == 0:
print('Master seed: {} '.format(master_seed))
print(' Total number of virtual processes: {}'.format(N_vp))
print(' Global random number generator seed: {}'.format(grng_seed))
print(
' Seeds for random number generators of virtual processes: ' +
'{}'.format(rng_seeds))
# pass parameters to NEST kernel
self.sim_resolution = self.sim_dict['sim_resolution']
kernel_dict = {
'resolution': self.sim_resolution,
'grng_seed': grng_seed,
'rng_seeds': rng_seeds,
'overwrite_files': self.sim_dict['overwrite_files'],
'print_time': self.sim_dict['print_time']}
nest.SetKernelStatus(kernel_dict)
def __create_neuronal_populations(self):
""" Creates the neuronal populations.
The neuronal populations are created and the parameters are assigned
to them. The initial membrane potential of the neurons is drawn from
normal distributions dependent on the parameter ``V0_type``.
The first and last neuron id of each population is written to file.
"""
if nest.Rank() == 0:
print('Creating neuronal populations.')
self.pops = []
for i in np.arange(self.num_pops):
population = nest.Create(self.net_dict['neuron_model'],
self.num_neurons[i])
population.set(
tau_syn_ex=self.net_dict['neuron_params']['tau_syn'],
tau_syn_in=self.net_dict['neuron_params']['tau_syn'],
E_L=self.net_dict['neuron_params']['E_L'],
V_th=self.net_dict['neuron_params']['V_th'],
V_reset=self.net_dict['neuron_params']['V_reset'],
t_ref=self.net_dict['neuron_params']['t_ref'],
I_e=self.DC_amp[i])
if self.net_dict['V0_type'] == 'optimized':
population.set(V_m=nest.random.normal(
self.net_dict['neuron_params']['V0_mean']['optimized'][i],
self.net_dict['neuron_params']['V0_std']['optimized'][i]))
elif self.net_dict['V0_type'] == 'original':
population.set(V_m=nest.random.normal(
self.net_dict['neuron_params']['V0_mean']['original'],
self.net_dict['neuron_params']['V0_std']['original']))
else:
raise Exception(
'V0_type incorrect. ' +
'Valid options are "optimized" and "original".')
self.pops.append(population)
# write node ids to file
if nest.Rank() == 0:
fn = os.path.join(self.data_path, 'population_nodeids.dat')
with open(fn, 'w+') as f:
for pop in self.pops:
f.write('{} {}\n'.format(pop[0].global_id,
pop[-1].global_id))
def __create_recording_devices(self):
""" Creates one recording device of each kind per population.
Only devices which are given in ``sim_dict['rec_dev']`` are created.
"""
if nest.Rank() == 0:
print('Creating recording devices.')
if 'spike_detector' in self.sim_dict['rec_dev']:
if nest.Rank() == 0:
print(' Creating spike detectors.')
sd_dict = {'record_to': 'ascii',
'label': os.path.join(self.data_path, 'spike_detector')}
self.spike_detectors = nest.Create('spike_detector',
n=self.num_pops,
params=sd_dict)
if 'voltmeter' in self.sim_dict['rec_dev']:
if nest.Rank() == 0:
print(' Creating voltmeters.')
vm_dict = {'interval': self.sim_dict['rec_V_int'],
'record_to': 'ascii',
'record_from': ['V_m'],
'label': os.path.join(self.data_path, 'voltmeter')}
self.voltmeters = nest.Create('voltmeter',
n=self.num_pops,
params=vm_dict)
def __create_poisson_bg_input(self):
""" Creates the Poisson generators for ongoing background input if
specified in ``network_params.py``.
If ``poisson_input`` is ``False``, DC input is applied for compensation
in ``create_neuronal_populations()``.
"""
if nest.Rank() == 0:
print('Creating Poisson generators for background input.')
self.poisson_bg_input = nest.Create('poisson_generator',
n=self.num_pops)
self.poisson_bg_input.rate = \
self.net_dict['bg_rate'] * self.ext_indegrees
def __create_thalamic_stim_input(self):
""" Creates the thalamic neuronal population if specified in
``stim_dict``.
Thalamic neurons are of type ``parrot_neuron`` and receive input from a
Poisson generator.
Note that the number of thalamic neurons is not scaled with
``N_scaling``.
"""
if nest.Rank() == 0:
print('Creating thalamic input for external stimulation.')
self.thalamic_population = nest.Create(
'parrot_neuron', n=self.stim_dict['num_th_neurons'])
self.poisson_th = nest.Create('poisson_generator')
self.poisson_th.set(
rate=self.stim_dict['th_rate'],
start=self.stim_dict['th_start'],
stop=(self.stim_dict['th_start'] + self.stim_dict['th_duration']))
def __create_dc_stim_input(self):
""" Creates DC generators for external stimulation if specified
in ``stim_dict``.
The final amplitude is the ``stim_dict['dc_amp'] * net_dict['K_ext']``.
"""
dc_amp_stim = self.stim_dict['dc_amp'] * self.net_dict['K_ext']
if nest.Rank() == 0:
print('Creating DC generators for external stimulation.')
dc_dict = {'amplitude': dc_amp_stim,
'start': self.stim_dict['dc_start'],
'stop': (self.stim_dict['dc_start'] +
self.stim_dict['dc_dur'])}
self.dc_stim_input = nest.Create('dc_generator', n=self.num_pops,
params=dc_dict)
def __connect_neuronal_populations(self):
""" Creates the recurrent connections between neuronal populations. """
if nest.Rank() == 0:
print('Connecting neuronal populations recurrently.')
for i, target_pop in enumerate(self.pops):
for j, source_pop in enumerate(self.pops):
if self.num_synapses[i][j] >= 0.:
conn_dict_rec = {
'rule': 'fixed_total_number',
'N': self.num_synapses[i][j]}
if self.weight_matrix_mean[i][j] < 0:
w_min = np.NINF
w_max = 0.0
else:
w_min = 0.0
w_max = np.Inf
syn_dict = {
'synapse_model': 'static_synapse',
'weight': nest.math.redraw(
nest.random.normal(
mean=self.weight_matrix_mean[i][j],
std=abs(self.weight_matrix_mean[i][j] *
self.net_dict['weight_rel_std'])),
min=w_min,
max=w_max),
'delay': nest.math.redraw(
nest.random.normal(
mean=self.net_dict['delay_matrix_mean'][i][j],
std=(self.net_dict['delay_matrix_mean'][i][j] *
self.net_dict['delay_rel_std'])),
min=self.sim_resolution,
max=np.Inf)}
nest.Connect(
source_pop, target_pop,
conn_spec=conn_dict_rec,
syn_spec=syn_dict)
def __connect_recording_devices(self):
""" Connects the recording devices to the microcircuit."""
if nest.Rank == 0:
print('Connecting recording devices.')
for i, target_pop in enumerate(self.pops):
if 'spike_detector' in self.sim_dict['rec_dev']:
nest.Connect(target_pop, self.spike_detectors[i])
if 'voltmeter' in self.sim_dict['rec_dev']:
nest.Connect(self.voltmeters[i], target_pop)
def __connect_poisson_bg_input(self):
""" Connects the Poisson generators to the microcircuit."""
if nest.Rank() == 0:
print('Connecting Poisson generators for background input.')
for i, target_pop in enumerate(self.pops):
conn_dict_poisson = {'rule': 'all_to_all'}
syn_dict_poisson = {
'synapse_model': 'static_synapse',
'weight': self.weight_ext,
'delay': self.net_dict['delay_poisson']}
nest.Connect(
self.poisson_bg_input[i], target_pop,
conn_spec=conn_dict_poisson,
syn_spec=syn_dict_poisson)
def __connect_thalamic_stim_input(self):
""" Connects the thalamic input to the neuronal populations."""
if nest.Rank() == 0:
print('Connecting thalamic input.')
# connect Poisson input to thalamic population
nest.Connect(self.poisson_th, self.thalamic_population)
# connect thalamic population to neuronal populations
for i, target_pop in enumerate(self.pops):
conn_dict_th = {
'rule': 'fixed_total_number',
'N': self.num_th_synapses[i]}
syn_dict_th = {
'weight': nest.math.redraw(
nest.random.normal(
mean=self.weight_th,
std=self.weight_th * self.net_dict['weight_rel_std']),
min=0.0,
max=np.Inf),
'delay': nest.math.redraw(
nest.random.normal(
mean=self.stim_dict['delay_th_mean'],
std=(self.stim_dict['delay_th_mean'] *
self.stim_dict['delay_th_rel_std'])),
min=self.sim_resolution,
max=np.Inf)}
nest.Connect(
self.thalamic_population, target_pop,
conn_spec=conn_dict_th, syn_spec=syn_dict_th)
def __connect_dc_stim_input(self):
""" Connects the DC generators to the neuronal populations. """
if nest.Rank() == 0:
print('Connecting DC generators.')
for i, target_pop in enumerate(self.pops):
nest.Connect(self.dc_stim_input[i], target_pop)
|
weidel-p/nest-simulator
|
pynest/examples/Potjans_2014/network.py
|
Python
|
gpl-2.0
| 21,785
|
[
"NEURON"
] |
41be4ac70e42bc236a5887219274a39a3e3e0dc38c5d6c2b17f6ec3026b580f8
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ._reportdialog import ReportDialog
from gramps.gen.plug.report import CATEGORY_WEB
#-------------------------------------------------------------------------
#
# WebReportDialog class
#
#-------------------------------------------------------------------------
class WebReportDialog(ReportDialog):
"""
The WebReportDialog base class. This is a base class for generating
dialogs for web page reports.
"""
def __init__(self, dbstate, uistate, option_class, name, trans_name):
"""Initialize a dialog"""
self.category = CATEGORY_WEB
ReportDialog.__init__(self, dbstate, uistate, option_class,
name, trans_name)
self.options.handler.set_format_name('html')
def setup_init(self):
pass
def setup_target_frame(self):
"""Target frame is not used."""
pass
def parse_target_frame(self):
"""Target frame is not used."""
return 1
|
pmghalvorsen/gramps_branch
|
gramps/gui/plug/report/_webreportdialog.py
|
Python
|
gpl-2.0
| 1,982
|
[
"Brian"
] |
c9140a3c7b3316801066cad9ecd9ffaa697582abe582dc896de94bfd66a96c8e
|
# Copyright (c) 2012-2014, Max Zwiessele, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
__doc__ = """
Inference over Gaussian process latent functions
In all our GP models, the consistency propery means that we have a Gaussian
prior over a finite set of points f. This prior is
math:: N(f | 0, K)
where K is the kernel matrix.
We also have a likelihood (see GPy.likelihoods) which defines how the data are
related to the latent function: p(y | f). If the likelihood is also a Gaussian,
the inference over f is tractable (see exact_gaussian_inference.py).
If the likelihood object is something other than Gaussian, then exact inference
is not tractable. We then resort to a Laplace approximation (laplace.py) or
expectation propagation (ep.py).
The inference methods return a
:class:`~GPy.inference.latent_function_inference.posterior.Posterior`
instance, which is a simple
structure which contains a summary of the posterior. The model classes can then
use this posterior object for making predictions, optimizing hyper-parameters,
etc.
"""
class LatentFunctionInference(object):
def on_optimization_start(self):
"""
This function gets called, just before the optimization loop to start.
"""
pass
def on_optimization_end(self):
"""
This function gets called, just after the optimization loop ended.
"""
pass
class InferenceMethodList(LatentFunctionInference, list):
def on_optimization_start(self):
for inf in self:
inf.on_optimization_start()
def on_optimization_end(self):
for inf in self:
inf.on_optimization_end()
def __getstate__(self):
state = []
for inf in self:
state.append(inf)
return state
def __setstate__(self, state):
for inf in state:
self.append(inf)
from .exact_gaussian_inference import ExactGaussianInference
from .laplace import Laplace,LaplaceBlock
from GPy.inference.latent_function_inference.var_dtc import VarDTC
from .expectation_propagation import EP
from .expectation_propagation_dtc import EPDTC
from .dtc import DTC
from .fitc import FITC
from .var_dtc_parallel import VarDTC_minibatch
from .var_gauss import VarGauss
# class FullLatentFunctionData(object):
#
#
# class EMLikeLatentFunctionInference(LatentFunctionInference):
# def update_approximation(self):
# """
# This function gets called when the
# """
#
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
# """
# Do inference on the latent functions given a covariance function `kern`,
# inputs and outputs `X` and `Y`, inducing_inputs `Z`, and a likelihood `likelihood`.
# Additional metadata for the outputs `Y` can be given in `Y_metadata`.
# """
# raise NotImplementedError, "Abstract base class for full inference"
#
# class VariationalLatentFunctionInference(LatentFunctionInference):
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
# """
# Do inference on the latent functions given a covariance function `kern`,
# inputs and outputs `X` and `Y`, inducing_inputs `Z`, and a likelihood `likelihood`.
# Additional metadata for the outputs `Y` can be given in `Y_metadata`.
# """
# raise NotImplementedError, "Abstract base class for full inference"
|
beckdaniel/GPy
|
GPy/inference/latent_function_inference/__init__.py
|
Python
|
bsd-3-clause
| 3,440
|
[
"Gaussian"
] |
22f32d9750a7be3d23797f752353c9122015a7e05e80f994ea08d426c63896ac
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Visit.mobile'
db.add_column(u'clinics_visit', 'mobile',
self.gf('django.db.models.fields.CharField')(default='', max_length=11, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Visit.mobile'
db.delete_column(u'clinics_visit', 'mobile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_renovated': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year_opened': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstatistic': {
'Meta': {'unique_together': "[('clinic', 'statistic', 'month')]", 'object_name': 'ClinicStatistic'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'float_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int_value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'month': ('django.db.models.fields.DateField', [], {}),
'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'statistic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.Statistic']"}),
'text_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'clinics.genericfeedback': {
'Meta': {'object_name': 'GenericFeedback'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'message_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'clinics.patient': {
'Meta': {'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.region': {
'Meta': {'unique_together': "(('external_id', 'type'),)", 'object_name': 'Region'},
'alternate_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'external_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'lga'", 'max_length': '16'})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'survey_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 6, 0, 0)'}),
'welcome_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'clinics.visitregistrationerror': {
'Meta': {'object_name': 'VisitRegistrationError'},
'error_type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'clinics.visitregistrationerrorlog': {
'Meta': {'object_name': 'VisitRegistrationErrorLog'},
'error_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'message_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'statistics.statistic': {
'Meta': {'object_name': 'Statistic'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.StatisticGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'statistic_type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'statistics.statisticgroup': {
'Meta': {'object_name': 'StatisticGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['clinics']
|
myvoice-nigeria/myvoice
|
myvoice/clinics/migrations/0025_auto__add_field_visit_mobile.py
|
Python
|
bsd-2-clause
| 14,087
|
[
"VisIt"
] |
d1befe8315f4838db41b3a1c1b38d36cda02700d076903ce20e929c06215bb83
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import numpy as np, os
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
from horton.io.test.common import compute_mulliken_charges
from horton.test.common import tmpdir, compare_mols
def test_load_molden_li2_orca():
fn_molden = context.get_fn('test/li2.molden.input')
mol = IOData.from_file(fn_molden)
# Checkt title
assert mol.title == 'Molden file created by orca_2mkl for BaseName=li2'
# Check normalization
olp = mol.obasis.compute_overlap()
mol.orb_alpha.check_normalization(olp, 1e-5)
mol.orb_beta.check_normalization(olp, 1e-5)
# Check Mulliken charges
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, mol.pseudo_numbers, dm_full)
expected_charges = np.array([0.5, 0.5])
assert abs(charges - expected_charges).max() < 1e-5
def test_load_molden_h2o_orca():
fn_molden = context.get_fn('test/h2o.molden.input')
mol = IOData.from_file(fn_molden)
# Checkt title
assert mol.title == 'Molden file created by orca_2mkl for BaseName=h2o'
# Check normalization
olp = mol.obasis.compute_overlap()
mol.orb_alpha.check_normalization(olp, 1e-5)
# Check Mulliken charges
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, mol.pseudo_numbers, dm_full)
expected_charges = np.array([-0.816308, 0.408154, 0.408154])
assert abs(charges - expected_charges).max() < 1e-5
def test_load_molden_nh3_molden_pure():
# The file tested here is created with molden. It should be read in
# properly without altering normalization and sign conventions.
fn_molden = context.get_fn('test/nh3_molden_pure.molden')
mol = IOData.from_file(fn_molden)
# Check Mulliken charges. Comparison with numbers from the Molden program output.
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, mol.pseudo_numbers, dm_full)
molden_charges = np.array([0.0381, -0.2742, 0.0121, 0.2242])
assert abs(charges - molden_charges).max() < 1e-3
def test_load_molden_nh3_molden_cart():
# The file tested here is created with molden. It should be read in
# properly without altering normalization and sign conventions.
fn_molden = context.get_fn('test/nh3_molden_cart.molden')
mol = IOData.from_file(fn_molden)
# Check Mulliken charges. Comparison with numbers from the Molden program output.
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, mol.pseudo_numbers, dm_full)
print charges
molden_charges = np.array([0.3138, -0.4300, -0.0667, 0.1829])
assert abs(charges - molden_charges).max() < 1e-3
def test_load_molden_nh3_orca():
# The file tested here is created with ORCA. It should be read in
# properly by altering normalization and sign conventions.
fn_molden = context.get_fn('test/nh3_orca.molden')
mol = IOData.from_file(fn_molden)
# Check Mulliken charges. Comparison with numbers from the Molden program output.
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, mol.pseudo_numbers, dm_full)
molden_charges = np.array([0.0381, -0.2742, 0.0121, 0.2242])
assert abs(charges - molden_charges).max() < 1e-3
def test_load_molden_nh3_psi4():
# The file tested here is created with PSI4 (pre 1.0). It should be read in
# properly by altering normalization conventions.
fn_molden = context.get_fn('test/nh3_psi4.molden')
mol = IOData.from_file(fn_molden)
# Check Mulliken charges. Comparison with numbers from the Molden program output.
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, mol.pseudo_numbers, dm_full)
molden_charges = np.array([0.0381, -0.2742, 0.0121, 0.2242])
assert abs(charges - molden_charges).max() < 1e-3
def test_load_molden_nh3_psi4_1():
# The file tested here is created with PSI4 (version 1.0). It should be read in
# properly by renormalizing the contractions.
fn_molden = context.get_fn('test/nh3_psi4_1.0.molden')
mol = IOData.from_file(fn_molden)
# Check Mulliken charges. Comparison with numbers from the Molden program output.
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, mol.pseudo_numbers, dm_full)
molden_charges = np.array([0.0381, -0.2742, 0.0121, 0.2242])
assert abs(charges - molden_charges).max() < 1e-3
def test_load_molden_he2_ghost_psi4_1():
# The file tested here is created with PSI4 (version 1.0). It should be read in
# properly by ignoring the ghost atoms.
fn_molden = context.get_fn('test/he2_ghost_psi4_1.0.molden')
mol = IOData.from_file(fn_molden)
np.testing.assert_equal(mol.pseudo_numbers, np.array([2.0]))
# Check Mulliken charges. Comparison with numbers from the Molden program output.
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, np.array([0.0, 2.0]), dm_full)
molden_charges = np.array([-0.0041, 0.0041])
assert abs(charges - molden_charges).max() < 5e-4
def test_load_molden_nh3_molpro2012():
# The file tested here is created with MOLPRO2012.
fn_molden = context.get_fn('test/nh3_molpro2012.molden')
mol = IOData.from_file(fn_molden)
# Check Mulliken charges. Comparison with numbers from the Molden program output.
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, mol.pseudo_numbers, dm_full)
molden_charges = np.array([0.0381, -0.2742, 0.0121, 0.2242])
assert abs(charges - molden_charges).max() < 1e-3
def test_load_molden_neon_turbomole():
# The file tested here is created with Turbomole 7.1.
fn_molden = context.get_fn('test/neon_turbomole_def2-qzvp.molden')
mol = IOData.from_file(fn_molden)
# Check Mulliken charges.
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, mol.pseudo_numbers, dm_full)
assert abs(charges).max() < 1e-3
def test_load_molden_nh3_turbomole():
# The file tested here is created with Turbomole 7.1
fn_molden = context.get_fn('test/nh3_turbomole.molden')
mol = IOData.from_file(fn_molden)
# Check Mulliken charges. Comparison with numbers from the Turbomole output. These
# are slightly different than in the other tests because we are using Cartesian
# functions.
dm_full = mol.get_dm_full()
charges = compute_mulliken_charges(mol.obasis, mol.pseudo_numbers, dm_full)
molden_charges = np.array([0.03801, -0.27428, 0.01206, 0.22421])
assert abs(charges - molden_charges).max() < 1e-3
def check_load_dump_consistency(fn):
mol1 = IOData.from_file(context.get_fn(os.path.join('test', fn)))
with tmpdir('horton.io.test.test_molden.check_load_dump_consistency.%s' % fn) as dn:
fn_tmp = os.path.join(dn, 'foo.molden')
mol1.to_file(fn_tmp)
mol2 = IOData.from_file(fn_tmp)
compare_mols(mol1, mol2)
def test_load_dump_consistency_h2o():
check_load_dump_consistency('h2o.molden.input')
def test_load_dump_consistency_li2():
check_load_dump_consistency('li2.molden.input')
def test_load_dump_consistency_f():
check_load_dump_consistency('F.molden')
def test_load_dump_consistency_nh3_molden_pure():
check_load_dump_consistency('nh3_molden_pure.molden')
def test_load_dump_consistency_nh3_molden_cart():
check_load_dump_consistency('nh3_molden_cart.molden')
|
QuantumElephant/horton
|
horton/io/test/test_molden.py
|
Python
|
gpl-3.0
| 8,205
|
[
"ORCA",
"Psi4",
"TURBOMOLE"
] |
28dc8733a8b41184a7094abb51eeeebc97e4c5d98dcd4328f9d9972351b42806
|
""" DIRAC FileCatalog mix-in class to manage users and groups within the FC database
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
import time
import threading
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.UserGroupManager.UserAndGroupManagerBase \
import UserAndGroupManagerBase
class UserAndGroupManagerDB(UserAndGroupManagerBase):
def getUserAndGroupID(self, credDict):
""" Get a uid, gid tuple for the given Credentials """
# Get the user
s_uid = credDict.get('username', 'anon')
res = self.getUserID(s_uid)
if not res['OK']:
return res
uid = res['Value']
# Get the group (create it if it doesn't exist)
s_gid = credDict.get('group', 'anon')
res = self.getGroupID(s_gid)
if not res['OK']:
return res
gid = res['Value']
return S_OK((uid, gid))
#####################################################################
#
# User related methods
#
#####################################################################
def getUserID(self, user):
""" Get ID for a user specified by its name """
if isinstance(user, six.integer_types):
return S_OK(user)
if user in self.db.users.keys():
return S_OK(self.db.users[user])
return self.__addUser(user)
def addUser(self, uname):
""" Add a new user with a name 'uname' """
return self.getUserID(uname)
def getUsers(self):
# self.__refreshUsers()
return S_OK(self.db.users)
def findUser(self, user):
return self.getUserID(user)
def getUserName(self, uid):
""" Get user name for the given id """
if uid in self.db.uids.keys():
return S_OK(self.db.uids[uid])
return S_ERROR('User id %d not found' % uid)
def deleteUser(self, uname, force=True):
""" Delete a user specified by its name """
# ToDo: Check first if there are files belonging to the user
if not force:
pass
return self.__removeUser(uname)
def __addUser(self, uname):
startTime = time.time()
self.lock.acquire()
waitTime = time.time()
gLogger.debug("UserGroupManager AddUser lock created. Waited %.3f seconds. %s" % (waitTime - startTime, uname))
if uname in self.db.users.keys():
uid = self.db.users[uname]
gLogger.debug("UserGroupManager AddUser lock released. Used %.3f seconds. %s" % (time.time() - waitTime, uname))
self.lock.release()
return S_OK(uid)
res = self.db.insertFields('FC_Users', ['UserName'], [uname])
if not res['OK']:
gLogger.debug("UserGroupManager AddUser lock released. Used %.3f seconds. %s" % (time.time() - waitTime, uname))
self.lock.release()
if "Duplicate entry" in res['Message']:
result = self._refreshUsers()
if not result['OK']:
return result
if uname in self.db.users.keys():
uid = self.db.users[uname]
return S_OK(uid)
return res
uid = res['lastRowId']
self.db.uids[uid] = uname
self.db.users[uname] = uid
gLogger.debug("UserGroupManager AddUser lock released. Used %.3f seconds. %s" % (time.time() - waitTime, uname))
self.lock.release()
return S_OK(uid)
def __removeUser(self, uname):
startTime = time.time()
self.lock.acquire()
waitTime = time.time()
gLogger.debug("UserGroupManager RemoveUser lock created. Waited %.3f seconds. %s" % (waitTime - startTime, uname))
uid = self.db.users.get(uname, 'Missing')
req = "DELETE FROM FC_Users WHERE UserName='%s'" % uname
res = self.db._update(req)
if not res['OK']:
gLogger.debug(
"UserGroupManager RemoveUser lock released. Used %.3f seconds. %s" %
(time.time() - waitTime, uname))
self.lock.release()
return res
if uid != 'Missing':
self.db.users.pop(uname)
self.db.uids.pop(uid)
gLogger.debug("UserGroupManager RemoveUser lock released. Used %.3f seconds. %s" % (time.time() - waitTime, uname))
self.lock.release()
return S_OK()
def _refreshUsers(self):
""" Get the current user IDs and names """
startTime = time.time()
self.lock.acquire()
waitTime = time.time()
gLogger.debug("UserGroupManager RefreshUsers lock created. Waited %.3f seconds." % (waitTime - startTime))
req = "SELECT UID,UserName from FC_Users"
res = self.db._query(req)
if not res['OK']:
gLogger.debug("UserGroupManager RefreshUsers lock released. Used %.3f seconds." % (time.time() - waitTime))
self.lock.release()
return res
self.db.users = {}
self.db.uids = {}
for uid, uname in res['Value']:
self.db.users[uname] = uid
self.db.uids[uid] = uname
gLogger.debug("UserGroupManager RefreshUsers lock released. Used %.3f seconds." % (time.time() - waitTime))
self.lock.release()
return S_OK()
#####################################################################
#
# Group related methods
#
def getGroupID(self, group):
""" Get ID for a group specified by its name """
if isinstance(group, six.integer_types):
return S_OK(group)
if group in self.db.groups.keys():
return S_OK(self.db.groups[group])
return self.__addGroup(group)
def addGroup(self, gname):
""" Add a new group with a name 'name' """
return self.getGroupID(gname)
def getGroups(self):
# self.__refreshGroups()
return S_OK(self.db.groups)
def findGroup(self, group):
return self.getGroupID(group)
def getGroupName(self, gid):
""" Get group name for the given id """
if gid in self.db.gids.keys():
return S_OK(self.db.gids[gid])
return S_ERROR('Group id %d not found' % gid)
def deleteGroup(self, gname, force=True):
""" Delete a group specified by its name """
if not force:
# ToDo: Check first if there are files belonging to the group
pass
return self.__removeGroup(gname)
def __addGroup(self, group):
startTime = time.time()
self.lock.acquire()
waitTime = time.time()
gLogger.debug("UserGroupManager AddGroup lock created. Waited %.3f seconds. %s" % (waitTime - startTime, group))
if group in self.db.groups.keys():
gid = self.db.groups[group]
gLogger.debug("UserGroupManager AddGroup lock released. Used %.3f seconds. %s" % (time.time() - waitTime, group))
self.lock.release()
return S_OK(gid)
res = self.db.insertFields('FC_Groups', ['GroupName'], [group])
if not res['OK']:
gLogger.debug("UserGroupManager AddGroup lock released. Used %.3f seconds. %s" % (time.time() - waitTime, group))
self.lock.release()
if "Duplicate entry" in res['Message']:
result = self._refreshGroups()
if not result['OK']:
return result
if group in self.db.groups.keys():
gid = self.db.groups[group]
return S_OK(gid)
return res
gid = res['lastRowId']
self.db.gids[gid] = group
self.db.groups[group] = gid
gLogger.debug("UserGroupManager AddGroup lock released. Used %.3f seconds. %s" % (time.time() - waitTime, group))
self.lock.release()
return S_OK(gid)
def __removeGroup(self, group):
startTime = time.time()
self.lock.acquire()
waitTime = time.time()
gLogger.debug("UserGroupManager RemoveGroup lock created. Waited %.3f seconds. %s" % (waitTime - startTime, group))
gid = self.db.groups.get(group, 'Missing')
req = "DELETE FROM FC_Groups WHERE GroupName='%s'" % group
res = self.db._update(req)
if not res['OK']:
gLogger.debug(
"UserGroupManager RemoveGroup lock released. Used %.3f seconds. %s" %
(time.time() - waitTime, group))
self.lock.release()
return res
if gid != 'Missing':
self.db.groups.pop(group)
self.db.gids.pop(gid)
gLogger.debug("UserGroupManager RemoveGroup lock released. Used %.3f seconds. %s" % (time.time() - waitTime,
group))
self.lock.release()
return S_OK()
def _refreshGroups(self):
""" Get the current group IDs and names """
req = "SELECT GID,GroupName from FC_Groups"
startTime = time.time()
self.lock.acquire()
waitTime = time.time()
gLogger.debug("UserGroupManager RefreshGroups lock created. Waited %.3f seconds." % (waitTime - startTime))
res = self.db._query(req)
if not res['OK']:
gLogger.debug("UserGroupManager RefreshGroups lock released. Used %.3f seconds." % (time.time() - waitTime))
self.lock.release()
return res
self.db.groups = {}
self.db.gids = {}
for gid, gname in res['Value']:
self.db.groups[gname] = gid
self.db.gids[gid] = gname
gLogger.debug("UserGroupManager RefreshGroups lock released. Used %.3f seconds." % (time.time() - waitTime))
self.lock.release()
return S_OK()
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/UserGroupManager/UserAndGroupManagerDB.py
|
Python
|
gpl-3.0
| 8,952
|
[
"DIRAC"
] |
ab84be12a275a74f33bdc2cd77c4ca57d7c9f1d067e78dc2165d0ddbaff52a4a
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
This module provides the Plugin class for export plugins.
"""
from . import Plugin
class ExportPlugin(Plugin):
"""
This class represents a plugin for exporting data from Gramps
"""
def __init__(self, name, description, export_function,
extension, config=None):
"""
@param name: A friendly name to call this plugin.
Example: "GEDCOM Export"
@type name: string
@param description: A short description of the plugin.
Example: "This plugin will export a GEDCOM file from database"
@type description: string
@param export_function: A function to call to perform the export.
The function must take the form:
def export_function(database, filename, user, option_box):
where:
"db" is a Gramps database to import the data into
"filename" is the file that the data will be exported to
"user" provides UI output (callback, errors, etc)
@type export_function: callable
@param extension: The extension for the output file.
Example: "ged"
@type extension: str
@param config: Options for the exporter
@type config: tuple (??,??)
@return: nothing
"""
Plugin.__init__(self, name, description, export_function.__module__)
self.__export_func = export_function
self.__extension = extension
self.__config = config
def get_export_function(self):
"""
Get the export function for this plugin.
@return: the callable export_function passed into __init__
"""
return self.__export_func
def get_extension(self):
"""
Get the file extension for the export file.
@return: str
"""
return self.__extension
def get_config(self):
"""
Get the config.
@return: (??,??)
"""
return self.__config
|
Forage/Gramps
|
gramps/gen/plug/_export.py
|
Python
|
gpl-2.0
| 2,836
|
[
"Brian"
] |
55ee33f7fcf720346f598fb8c08939d96b5e87b1d17fcef90fe1f10ced99e339
|
from mypyli import blastparser
from Bio import SeqIO
import argparse
import logging
import sys
def get_seq_ids(fasta_f):
""" Returns a list of sequence headers and makes sure they are all unique."""
ids = []
with open(fasta_f, 'r') as IN:
for seq in SeqIO.parse(IN, 'fasta'):
ids.append(seq.id)
if len(ids) != len(set(ids)):
raise AssertionError("One or more sequence headers are duplicates!")
else:
return ids
def get_top_hits_from_blast(blast_f, max_e, min_id):
top_hits = {}
logging.info(blast_f)
with open(blast_f, 'r') as IN:
for record in blastparser.parse(IN, outfmt='6'):
if record.perc_id >= min_id and record.evalue <= max_e:
prev_hit = top_hits.get(record.query, (0, 0))
if prev_hit[1] < record.evalue:
top_hits[record.query] = (record.subject, record.evalue)
return top_hits
def print_taxonomy_assignments(out_f, seq_ids, hits, taxonomy_assignments):
with open(out_f, 'w') as OUT:
for seq in seq_ids:
if seq in hits:
taxonomy = taxonomy_assignments[hits[seq][0]]
evalue = hits[seq][1]
else:
taxonomy = "Unassigned"
evalue = 0
OUT.write("\t".join([seq, taxonomy, str(evalue)]) + "\n")
def get_taxonomy(seq2tax_f, seq_ids):
taxonomy = {}
with open(seq2tax_f, 'r', errors='ignore') as IN:
for line in IN:
seq, tax = line[:-1].split("\t")
#print((seq, tax))
if seq in seq_ids:
taxonomy[seq] = tax
return taxonomy
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="This is an implementation of QIIME's assign_taxonomy.py -m blast pipeline. This is required because the QIIME BLAST wrapper required legacy BLAST and all we have is BLAST+.")
parser.add_argument("-e", type=float, help="maximum E-value to consider (default: 1E-30)", default=1E-30)
parser.add_argument("-id", type=float, help="minimum identity to consider (default: 90)", default=90.0)
parser.add_argument("-t", help="tab-delimited file that maps sequences to taxonomy", required=True)
parser.add_argument("-r", help="FASTA file of reference sequences", required=True)
parser.add_argument("-input", "-i", help="input FASTA file", required=True)
parser.add_argument("-out", help="output classifications file", default="blast_taxonomy_classifications.txt")
parser.add_argument("-b", help="BLAST output file in outfmt 6")
args = parser.parse_args()
# turn id into a percent if necessary
if args.id < 1:
args.id *= 100
# run the BLAST if needed
if not args.b:
raise AssertionError("Currently, you must feed the program a BLAST file!")
#get_taxonomy(args.t, [])
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.info("Getting seq ids from input...")
seq_ids = get_seq_ids(args.input)
logging.info("Getting top hit from each id...")
hits = get_top_hits_from_blast(args.b, args.e, args.id)
logging.info("Looking up taxonomy for top hits...")
taxonomy_assignments = get_taxonomy(args.t, set([hits[hit][0] for hit in hits]))
logging.info("Printing results to {}".format(args.out))
print_taxonomy_assignments(args.out, seq_ids, hits, taxonomy_assignments)
|
hunter-cameron/Bioinformatics
|
job_scripts/tati/implement_qiime_blast_taxonomy.py
|
Python
|
mit
| 3,420
|
[
"BLAST"
] |
2c4c4dfbb2ff577ffe52948907025322d7bdc9afe2225e3d8be96ac426eb4ccd
|
# (c) 2015, Brian Coca <bcoca@ansible.com>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: url
author: Brian Coca (@bcoca)
version_added: "1.9"
short_description: return contents from URL
description:
- Returns the content of the URL requested to be used as data in play.
options:
_terms:
description: urls to query
validate_certs:
description: Flag to control SSL certificate validation
type: boolean
default: True
split_lines:
description: Flag to control if content is returned as a list of lines or as a single text blob
type: boolean
default: True
use_proxy:
description: Flag to control if the lookup will observe HTTP proxy environment variables when present.
type: boolean
default: True
username:
description: Username to use for HTTP authentication.
type: string
default: None
version_added: "2.8"
password:
description: Password to use for HTTP authentication.
type: string
default: None
version_added: "2.8"
headers:
description: HTTP request headers
type: dictionary
default: {}
version_added: "2.9"
"""
EXAMPLES = """
- name: url lookup splits lines by default
debug: msg="{{item}}"
loop: "{{ lookup('url', 'https://github.com/gremlin.keys', wantlist=True) }}"
- name: display ip ranges
debug: msg="{{ lookup('url', 'https://ip-ranges.amazonaws.com/ip-ranges.json', split_lines=False) }}"
- name: url lookup using authentication
debug: msg="{{ lookup('url', 'https://some.private.site.com/file.txt', username='bob', password='hunter2') }}"
- name: url lookup using headers
debug: msg="{{ lookup('url', 'https://some.private.site.com/api/service', headers={'header1':'value1', 'header2':'value2'} ) }}"
"""
RETURN = """
_list:
description: list of list of lines or content of url(s)
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.plugins.lookup import LookupBase
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(direct=kwargs)
ret = []
for term in terms:
display.vvvv("url lookup connecting to %s" % term)
try:
response = open_url(term, validate_certs=self.get_option('validate_certs'),
use_proxy=self.get_option('use_proxy'),
url_username=self.get_option('username'),
url_password=self.get_option('password'),
headers=self.get_option('headers'))
except HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (term, to_native(e)))
except URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (term, to_native(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, to_native(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (term, to_native(e)))
if self.get_option('split_lines'):
for line in response.read().splitlines():
ret.append(to_text(line))
else:
ret.append(to_text(response.read()))
return ret
|
aperigault/ansible
|
lib/ansible/plugins/lookup/url.py
|
Python
|
gpl-3.0
| 3,817
|
[
"Brian"
] |
36027350b0794648148206af5e78be10266c599325b28cd640fc2aa638cb5edf
|
# -*- coding: utf-8 -*-;
# Generated by Django 1.11.23 on 2020-01-29 09:38
from __future__ import unicode_literals
import django.db.models.deletion
import enumfields.fields
import filer.fields.image
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
import shuup.core.fields
import shuup.core.models
import shuup.core.models._service_payment
import shuup.core.models._shops
import shuup.core.models._units
class Migration(migrations.Migration):
dependencies = [
('shuup', '0067_supplier_name_max_length_to_128'),
]
operations = [
migrations.AlterField(
model_name='attribute',
name='searchable',
field=models.BooleanField(default=True, help_text='Searchable attributes will be used for product lookup when customers search in your store.', verbose_name='searchable'),
),
migrations.AlterField(
model_name='category',
name='image',
field=filer.fields.image.FilerImageField(blank=True, help_text='Category image. Will be shown in places defined by the graphical theme in use.', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.FILER_IMAGE_MODEL, verbose_name='image'),
),
migrations.AlterField(
model_name='category',
name='ordering',
field=models.IntegerField(default=0, help_text='You can assign numerical values to images to tell the order in which they shall be displayed on the vendor page. You can also use the `Organize` button in the list view to order them visually with a drag-and-drop.', verbose_name='ordering'),
),
migrations.AlterField(
model_name='category',
name='status',
field=enumfields.fields.EnumIntegerField(db_index=True, default=1, enum=shuup.core.models.CategoryStatus, help_text='Choose if you want this category to be visible in your store.', verbose_name='status'),
),
migrations.AlterField(
model_name='category',
name='visibility',
field=enumfields.fields.EnumIntegerField(db_index=True, default=1, enum=shuup.core.models.CategoryVisibility, help_text='You can choose to limit who sees your category based on whether they are logged in or if they are part of a certain customer group.', verbose_name='visibility limitations'),
),
migrations.AlterField(
model_name='category',
name='visibility_groups',
field=models.ManyToManyField(blank=True, help_text='Select the customer groups you want to see this category. There are three groups created by default: Company, Person, Anonymous. In addition you can also define custom groups by searching for `Contact Groups`.', related_name='visible_categories', to='shuup.ContactGroup', verbose_name='visible for groups'),
),
migrations.AlterField(
model_name='category',
name='visible_in_menu',
field=models.BooleanField(default=True, help_text="Enable if this category should be visible in the store front's menu.", verbose_name='visible in menu'),
),
migrations.AlterField(
model_name='categorytranslation',
name='name',
field=models.CharField(db_index=True, help_text='Enter a descriptive name for your product category. Products can be found in the store front under the defined product category either directly in menus or while searching.', max_length=128, verbose_name='name'),
),
migrations.AlterField(
model_name='categorytranslation',
name='slug',
field=models.SlugField(blank=True, help_text='Enter a URL slug for your category. Slug is user- and search engine-friendly short text used in a URL to identify and describe a resource. In this case it will determine what your product category page URL in the browser address bar will look like. A default will be created using the category name.', null=True, verbose_name='slug'),
),
migrations.AlterField(
model_name='contact',
name='is_active',
field=models.BooleanField(db_index=True, default=True, help_text='Enable this if the contact is an active customer.', verbose_name='active'),
),
migrations.AlterField(
model_name='contact',
name='marketing_permission',
field=models.BooleanField(default=False, help_text='Enable this if the contact can receive marketing and promotional materials.', verbose_name='marketing permission'),
),
migrations.AlterField(
model_name='contact',
name='tax_group',
field=models.ForeignKey(blank=True, help_text="Select the contact tax group to use for this contact. Tax groups can be used to customize the tax rules the that apply to any of this contact's orders. Tax groups are defined in `Customer Tax Groups` and can be applied to tax rules in `Tax Rules`.", null=True, on_delete=django.db.models.deletion.PROTECT, to='shuup.CustomerTaxGroup', verbose_name='tax group'),
),
migrations.AlterField(
model_name='contactgrouptranslation',
name='name',
field=models.CharField(help_text='The contact group name. Contact groups can be used to target sales and campaigns to a specific set of users.', max_length=256, verbose_name='name'),
),
migrations.AlterField(
model_name='custompaymentprocessor',
name='rounding_mode',
field=enumfields.fields.EnumField(default='ROUND_HALF_UP', enum=shuup.core.models._service_payment.RoundingMode, help_text='Choose rounding mode for cash payment.', max_length=50, verbose_name='rounding mode'),
),
migrations.AlterField(
model_name='custompaymentprocessor',
name='rounding_quantize',
field=models.DecimalField(decimal_places=9, default=Decimal('0.05'), help_text='Choose rounding quantize (precision) for cash payment.', max_digits=36, verbose_name='rounding quantize'),
),
migrations.AlterField(
model_name='displayunit',
name='allow_bare_number',
field=models.BooleanField(default=False, help_text="If true, values of this unit can occasionally be shown without the symbol attached to it. E.g. if the unit is a `piece`, then you might want for the product listings to only show '$5.95' rather than '$5.95 per pc.'.", verbose_name='allow bare number'),
),
migrations.AlterField(
model_name='displayunit',
name='comparison_value',
field=shuup.core.fields.QuantityField(decimal_places=9, default=1, help_text='Value to use when displaying unit prices. E.g. if the display unit is a gram and the comparison value is 100, then unit prices are shown per 100g, like: $2.95 per 100g.', max_digits=36, validators=[shuup.core.models._units.validate_positive_not_zero], verbose_name='comparison value'),
),
migrations.AlterField(
model_name='displayunit',
name='decimals',
field=models.PositiveSmallIntegerField(default=0, help_text='The number of decimal places to use for values in the display unit. The internal values are still rounded based on the settings of the internal unit.', verbose_name='decimal places'),
),
migrations.AlterField(
model_name='displayunit',
name='ratio',
field=shuup.core.fields.QuantityField(decimal_places=9, default=1, help_text='Size of the display unit in internal unit. E.g. if internal unit is kilogram and display unit is gram, ratio is 0.001.', max_digits=36, validators=[shuup.core.models._units.validate_positive_not_zero], verbose_name='ratio'),
),
migrations.AlterField(
model_name='displayunittranslation',
name='name',
field=models.CharField(help_text='Name of the display unit, e.g. grams.', max_length=150, verbose_name='name'),
),
migrations.AlterField(
model_name='immutableaddress',
name='tax_number',
field=models.CharField(blank=True, help_text='The business tax number. For example, EIN in the USA or VAT code in the EU.', max_length=64, verbose_name='tax number'),
),
migrations.AlterField(
model_name='manufacturer',
name='name',
field=models.CharField(help_text='Enter the manufacturer’s name. Products can be filtered by the manufacturer and this can be useful for inventory and stock management.', max_length=128, verbose_name='name'),
),
migrations.AlterField(
model_name='mutableaddress',
name='tax_number',
field=models.CharField(blank=True, help_text='The business tax number. For example, EIN in the USA or VAT code in the EU.', max_length=64, verbose_name='tax number'),
),
migrations.AlterField(
model_name='orderlinetax',
name='base_amount_value',
field=shuup.core.fields.MoneyValueField(decimal_places=9, help_text='Amount that this tax is calculated from.', max_digits=36, verbose_name='base amount'),
),
migrations.AlterField(
model_name='orderstatus',
name='default',
field=models.BooleanField(db_index=True, default=False, help_text='Defines if the status should be considered as default. Default is always processed first.', verbose_name='default'),
),
migrations.AlterField(
model_name='orderstatus',
name='is_active',
field=models.BooleanField(db_index=True, default=True, help_text='Defines if the status is usable.', verbose_name='is active'),
),
migrations.AlterField(
model_name='orderstatus',
name='role',
field=enumfields.fields.EnumIntegerField(db_index=True, default=0, enum=shuup.core.models.OrderStatusRole, help_text='The role of this status. One role can have multiple order statuses.', verbose_name='role'),
),
migrations.AlterField(
model_name='orderstatustranslation',
name='name',
field=models.CharField(help_text='Name of the order status.', max_length=64, verbose_name='name'),
),
migrations.AlterField(
model_name='orderstatustranslation',
name='public_name',
field=models.CharField(help_text='The name shown to the customers in shop front.', max_length=64, verbose_name='public name'),
),
migrations.AlterField(
model_name='paymentmethod',
name='enabled',
field=models.BooleanField(default=False, help_text='Enable this if this service should be selectable on checkout.', verbose_name='enabled'),
),
migrations.AlterField(
model_name='paymentmethod',
name='tax_class',
field=models.ForeignKey(help_text='The tax class to use for this service. Define by searching for `Tax Classes`.', on_delete=django.db.models.deletion.PROTECT, to='shuup.TaxClass', verbose_name='tax class'),
),
migrations.AlterField(
model_name='paymentmethodtranslation',
name='description',
field=models.CharField(blank=True, help_text='The description of the payment method. This description is shown to the customers on checkout.', max_length=500, verbose_name='description'),
),
migrations.AlterField(
model_name='paymentmethodtranslation',
name='name',
field=models.CharField(help_text='The payment method name. This name is shown to the customers on checkout.', max_length=100, verbose_name='name'),
),
migrations.AlterField(
model_name='product',
name='depth',
field=shuup.core.fields.MeasurementField(decimal_places=9, default=0, help_text='Set the measured depth or length (in millimeters) of your product or product packaging. This will provide customers with the product size and help with calculating shipping costs.', max_digits=36, unit='mm', verbose_name='depth (mm)'),
),
migrations.AlterField(
model_name='product',
name='gross_weight',
field=shuup.core.fields.MeasurementField(decimal_places=9, default=0, help_text='Set the measured gross weight (in grams) of your product WITH its packaging. This will help with calculating shipping costs.', max_digits=36, unit='g', verbose_name='gross weight (g)'),
),
migrations.AlterField(
model_name='product',
name='height',
field=shuup.core.fields.MeasurementField(decimal_places=9, default=0, help_text='Set the measured height (in millimeters) of your product or product packaging. This will provide customers with the product size and help with calculating shipping costs.', max_digits=36, unit='mm', verbose_name='height (mm)'),
),
migrations.AlterField(
model_name='product',
name='manufacturer',
field=models.ForeignKey(blank=True, help_text='Select a manufacturer for your product. To define these, search for `Manufacturers`.', null=True, on_delete=django.db.models.deletion.PROTECT, to='shuup.Manufacturer', verbose_name='manufacturer'),
),
migrations.AlterField(
model_name='product',
name='net_weight',
field=shuup.core.fields.MeasurementField(decimal_places=9, default=0, help_text="Set the measured weight (in grams) of your product WITHOUT its packaging. This will provide customers with the actual product's weight.", max_digits=36, unit='g', verbose_name='net weight (g)'),
),
migrations.AlterField(
model_name='product',
name='sales_unit',
field=models.ForeignKey(blank=True, help_text='Select a sales unit for your product. This is shown in your store front and is used to determine whether the product can be purchased using fractional amounts. To change settings search for `Sales Units`.', null=True, on_delete=django.db.models.deletion.PROTECT, to='shuup.SalesUnit', verbose_name='sales unit'),
),
migrations.AlterField(
model_name='product',
name='shipping_mode',
field=enumfields.fields.EnumIntegerField(default=1, enum=shuup.core.models.ShippingMode, help_text='Set to `shipped` if the product requires shipment.', verbose_name='shipping mode'),
),
migrations.AlterField(
model_name='product',
name='sku',
field=models.CharField(db_index=True, help_text="Enter a SKU (Stock Keeping Unit) number for your product. This is a product identification code that helps you track products through your inventory and analyze their movement. People often use the product's barcode number, but you can set up any numerical system you want to keep track of products.", max_length=128, unique=True, verbose_name='SKU'),
),
migrations.AlterField(
model_name='product',
name='tax_class',
field=models.ForeignKey(help_text='Select a tax class for your product. The tax class is used to determine which taxes to apply to your product. Define tax classes by searching for `Tax Classes`. To define the rules by which taxes are applied search for `Tax Rules`.', on_delete=django.db.models.deletion.PROTECT, to='shuup.TaxClass', verbose_name='tax class'),
),
migrations.AlterField(
model_name='product',
name='width',
field=shuup.core.fields.MeasurementField(decimal_places=9, default=0, help_text='Set the measured width (in millimeters) of your product or product packaging. This will provide customers with the product size and help with calculating shipping costs.', max_digits=36, unit='mm', verbose_name='width (mm)'),
),
migrations.AlterField(
model_name='productmedia',
name='ordering',
field=models.IntegerField(default=0, help_text='You can assign numerical values to images to tell the order in which they shall be displayed on the product page.', verbose_name='ordering'),
),
migrations.AlterField(
model_name='productmedia',
name='public',
field=models.BooleanField(default=True, help_text='Enable this if you want this image be shown on the product page. Enabled by default.', verbose_name='public (shown on product page)'),
),
migrations.AlterField(
model_name='productmedia',
name='purchased',
field=models.BooleanField(default=False, help_text='Enable this if you want the product media to be shown for completed purchases.', verbose_name='purchased (shown for finished purchases)'),
),
migrations.AlterField(
model_name='producttranslation',
name='name',
field=models.CharField(db_index=True, help_text='Enter a descriptive name for your product. This will be its title in your store front.', max_length=256, verbose_name='name'),
),
migrations.AlterField(
model_name='producttranslation',
name='short_description',
field=models.CharField(blank=True, help_text='Enter a short description for your product. The short description will be used to get the attention of your customer with a small, but precise description of your product. It also helps with getting more traffic via search engines.', max_length=150, verbose_name='short description'),
),
migrations.AlterField(
model_name='producttranslation',
name='slug',
field=models.SlugField(blank=True, help_text='Enter a URL slug for your product. Slug is user- and search engine-friendly short text used in a URL to identify and describe a resource. In this case it will determine what your product page URL in the browser address bar will look like. A default will be created using the product name.', max_length=255, null=True, verbose_name='slug'),
),
migrations.AlterField(
model_name='producttranslation',
name='variation_name',
field=models.CharField(blank=True, help_text='You can enter a name for the variation of your product. This could be for example different colors, sizes or versions. To manage variations, at the top of the the individual product page, click `Actions` -> `Manage Variations`.', max_length=128, verbose_name='variation name'),
),
migrations.AlterField(
model_name='producttype',
name='attributes',
field=models.ManyToManyField(blank=True, help_text='Select attributes that go with your product type. To change available attributes search for `Attributes`.', related_name='product_types', to='shuup.Attribute', verbose_name='attributes'),
),
migrations.AlterField(
model_name='salesunit',
name='decimals',
field=models.PositiveSmallIntegerField(default=0, help_text='The number of decimal places allowed by this sales unit.Set this to a value greater than zero if products with this sales unit can be sold in fractional quantities.', verbose_name='allowed decimal places'),
),
migrations.AlterField(
model_name='salesunittranslation',
name='name',
field=models.CharField(help_text="The sales unit name to use for products (e.g. 'pieces' or 'units'). Sales units can be set individually for each product through the product editor view.", max_length=128, verbose_name='name'),
),
migrations.AlterField(
model_name='salesunittranslation',
name='symbol',
field=models.CharField(help_text='An abbreviated name for this sales unit that is shown throughout admin and order invoices.', max_length=128, verbose_name='unit symbol'),
),
migrations.AlterField(
model_name='serviceprovider',
name='enabled',
field=models.BooleanField(default=True, help_text='Enable this if this service provider can be used when placing orders.', verbose_name='enabled'),
),
migrations.AlterField(
model_name='shippingmethod',
name='enabled',
field=models.BooleanField(default=False, help_text='Enable this if this service should be selectable on checkout.', verbose_name='enabled'),
),
migrations.AlterField(
model_name='shippingmethod',
name='tax_class',
field=models.ForeignKey(help_text='The tax class to use for this service. Define by searching for `Tax Classes`.', on_delete=django.db.models.deletion.PROTECT, to='shuup.TaxClass', verbose_name='tax class'),
),
migrations.AlterField(
model_name='shippingmethodtranslation',
name='name',
field=models.CharField(help_text='The shipping method name. This name is shown to the customers on checkout.', max_length=100, verbose_name='name'),
),
migrations.AlterField(
model_name='shop',
name='currency',
field=shuup.core.fields.CurrencyField(default=shuup.core.models._shops._get_default_currency, help_text='The primary shop currency. This is the currency used when selling the products.', max_length=4, verbose_name='currency'),
),
migrations.AlterField(
model_name='shop',
name='domain',
field=models.CharField(blank=True, help_text='Your shop domain name. Use this field to configure the URL that is used to visit your store front. Note: this requires additional configuration through your internet domain registrar.', max_length=128, null=True, unique=True, verbose_name='domain'),
),
migrations.AlterField(
model_name='shop',
name='favicon',
field=filer.fields.image.FilerImageField(blank=True, help_text="Shop's favicon - a mini-image graphically representing your shop. Depending on the browser, it will be shown next to the address bar and/or on the website title tab.", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='shop_favicons', to=settings.FILER_IMAGE_MODEL, verbose_name='favicon'),
),
migrations.AlterField(
model_name='shop',
name='logo',
field=filer.fields.image.FilerImageField(blank=True, help_text="Shop's logo. Will be shown at theme.", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='shop_logos', to=settings.FILER_IMAGE_MODEL, verbose_name='logo'),
),
migrations.AlterField(
model_name='shop',
name='maintenance_mode',
field=models.BooleanField(default=False, help_text="Enable if you want to make your shop temporarily unavailable to visitors while you do shop maintenance or for some other reason. If you don't plan to have this shop open again, change the `Status` on the main `General Information` tab to `Disabled`.", verbose_name='maintenance mode'),
),
migrations.AlterField(
model_name='shop',
name='prices_include_tax',
field=models.BooleanField(default=True, help_text='This option defines whether product prices entered in admin include taxes. Note: this behavior can be overridden with contact group pricing.', verbose_name='prices include tax'),
),
migrations.AlterField(
model_name='shop',
name='status',
field=enumfields.fields.EnumIntegerField(default=0, enum=shuup.core.models.ShopStatus, help_text="Your shop's status. Disable your shop if it's no longer in use. For temporary closing enable the maintenance mode, available in the `Maintenance Mode` tab on the left.", verbose_name='status'),
),
migrations.AlterField(
model_name='shopproduct',
name='available_until',
field=models.DateTimeField(blank=True, help_text='After this date this product will be invisible in store front.', null=True, verbose_name='available until'),
),
migrations.AlterField(
model_name='shopproduct',
name='backorder_maximum',
field=shuup.core.fields.QuantityField(blank=True, decimal_places=9, default=0, help_text='The number of units that can be purchased after the product is already sold out (out of stock). Set to blank for product to be purchasable without limits.', max_digits=36, null=True, verbose_name='backorder maximum'),
),
migrations.AlterField(
model_name='shopproduct',
name='default_price_value',
field=shuup.core.fields.MoneyValueField(blank=True, decimal_places=9, help_text='This is the default individual base unit (or multi-pack) price of the product. All discounts or coupons will be calculated based off of this price.', max_digits=36, null=True, verbose_name='default price'),
),
migrations.AlterField(
model_name='shopproduct',
name='display_unit',
field=models.ForeignKey(blank=True, help_text='Unit for displaying quantities of this product.', null=True, on_delete=django.db.models.deletion.CASCADE, to='shuup.DisplayUnit', verbose_name='display unit'),
),
migrations.AlterField(
model_name='shopproduct',
name='limit_payment_methods',
field=models.BooleanField(default=False, help_text='Enable this if you want to limit your product to use only the select payment methods. You can select the allowed payment method(s) in the field below - all the rest are disallowed.', verbose_name='limit the payment methods'),
),
migrations.AlterField(
model_name='shopproduct',
name='limit_shipping_methods',
field=models.BooleanField(default=False, help_text='Enable this if you want to limit your product to use only the select payment methods. You can select the allowed payment method(s) in the field below - all the rest are disallowed.', verbose_name='limit the shipping methods'),
),
migrations.AlterField(
model_name='shopproduct',
name='minimum_price_value',
field=shuup.core.fields.MoneyValueField(blank=True, decimal_places=9, help_text='This is the default price that the product cannot go under in your store, despite coupons or discounts being applied. This is useful to make sure your product price stays above the cost.', max_digits=36, null=True, verbose_name='minimum price'),
),
migrations.AlterField(
model_name='shopproduct',
name='minimum_purchase_quantity',
field=shuup.core.fields.QuantityField(decimal_places=9, default=1, help_text='Set a minimum number of products needed to be ordered for the purchase. This is useful for setting bulk orders and B2B purchases.', max_digits=36, verbose_name='minimum purchase quantity'),
),
migrations.AlterField(
model_name='shopproduct',
name='payment_methods',
field=models.ManyToManyField(blank=True, help_text='If you enabled the `Limit the payment methods` choice above, then here you can select the individuals payment methods you want to ALLOW for this product. The ones not mentioned are disabled. To change this, search for `Payment Methods`.', related_name='payment_products', to='shuup.PaymentMethod', verbose_name='payment methods'),
),
migrations.AlterField(
model_name='shopproduct',
name='primary_category',
field=models.ForeignKey(blank=True, help_text='Choose the primary category for the product. This will be the main category for classification in the system. The product will be found under this category in your store. To change this, search for `Categories`.', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='primary_shop_products', to='shuup.Category', verbose_name='primary category'),
),
migrations.AlterField(
model_name='shopproduct',
name='purchase_multiple',
field=shuup.core.fields.QuantityField(decimal_places=9, default=0, help_text='Set this to other than 0 if the product needs to be purchased in multiples. For example, if the purchase multiple is set to 2, then customers are required to order the product in multiples of 2. Not to be confused with the Minimum Purchase Quantity.', max_digits=36, verbose_name='purchase multiple'),
),
migrations.AlterField(
model_name='shopproduct',
name='shipping_methods',
field=models.ManyToManyField(blank=True, help_text='If you enabled the `Limit the payment methods` choice above, then here you can select the individual shipping methods you want to ALLOW for this product. The ones not mentioned are disabled. To change this, search for `Shipping Methods`.', related_name='shipping_products', to='shuup.ShippingMethod', verbose_name='shipping methods'),
),
migrations.AlterField(
model_name='shopproduct',
name='shop_primary_image',
field=models.ForeignKey(blank=True, help_text='Click this to set this image as the primary display image for the product.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='primary_image_for_shop_products', to='shuup.ProductMedia', verbose_name='primary image'),
),
migrations.AlterField(
model_name='shopproduct',
name='suppliers',
field=models.ManyToManyField(blank=True, help_text='List your suppliers here. Suppliers can be found by searching for `Suppliers`.', related_name='shop_products', to='shuup.Supplier', verbose_name='suppliers'),
),
migrations.AlterField(
model_name='shopproduct',
name='visibility',
field=enumfields.fields.EnumIntegerField(db_index=True, default=3, enum=shuup.core.models.ShopProductVisibility, help_text='Choose how you want your product to be seen and found by the customers. <p>Not visible: Product will not be shown in your store front nor found in search.</p><p>Searchable: Product will be shown in search, but not listed on any category page.</p><p>Listed: Product will be shown on category pages, but not shown in search results.</p><p>Always Visible: Product will be shown in your store front and found in search.</p>', verbose_name='visibility'),
),
migrations.AlterField(
model_name='shopproduct',
name='visibility_groups',
field=models.ManyToManyField(blank=True, help_text='Select the groups you want to make your product visible for. These groups are defined in Contacts Settings - Contact Groups.', related_name='visible_products', to='shuup.ContactGroup', verbose_name='visible for groups'),
),
migrations.AlterField(
model_name='shopproduct',
name='visibility_limit',
field=enumfields.fields.EnumIntegerField(db_index=True, default=1, enum=shuup.core.models.ProductVisibility, help_text='Select whether you want your product to have special limitations on its visibility in your store. You can make products visible to all, visible to only logged-in users, or visible only to certain customer groups.', verbose_name='visibility limitations'),
),
migrations.AlterField(
model_name='shopproducttranslation',
name='description',
field=models.TextField(blank=True, help_text='To make your product stands out, give it an awesome description. This is what will help your shoppers learn about your products. It will also help shoppers find them in the store and on the web.', null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='shopproducttranslation',
name='name',
field=models.CharField(blank=True, help_text='Enter a descriptive name for your product. This will be its title in your store front.', max_length=256, null=True, verbose_name='name'),
),
migrations.AlterField(
model_name='shopproducttranslation',
name='short_description',
field=models.CharField(blank=True, help_text='Enter a short description for your product. The short description will be used to get the attention of your customer with a small, but precise description of your product. It also helps with getting more traffic via search engines.', max_length=150, null=True, verbose_name='short description'),
),
migrations.AlterField(
model_name='shoptranslation',
name='maintenance_message',
field=models.CharField(blank=True, help_text='The message to display to customers while your shop is in a maintenance mode.', max_length=300, verbose_name='maintenance message'),
),
migrations.AlterField(
model_name='shoptranslation',
name='name',
field=models.CharField(help_text='The shop name. This name is displayed throughout Admin Panel.', max_length=64, verbose_name='name'),
),
migrations.AlterField(
model_name='shoptranslation',
name='short_description',
field=models.CharField(blank=True, help_text='Enter a short description for your shop. The short description will be used to get the attention of your customer with a small, but precise description of your shop. It also helps with getting more traffic via search engines.', max_length=150, verbose_name='short description'),
),
migrations.AlterField(
model_name='supplier',
name='enabled',
field=models.BooleanField(default=True, help_text='Indicates whether this supplier is currently enabled. In order to participate fully, the supplier also needs to be `Approved`.', verbose_name='enabled'),
),
migrations.AlterField(
model_name='supplier',
name='is_approved',
field=models.BooleanField(default=True, help_text='Indicates whether this supplier is currently approved for work. In order to participate fully, the supplier also needs to be `Enabled`.', verbose_name='approved'),
),
migrations.AlterField(
model_name='supplier',
name='name',
field=models.CharField(db_index=True, help_text="The product supplier's name. You can enable suppliers to manage the inventory of stocked products.", max_length=128, verbose_name='name'),
),
migrations.AlterField(
model_name='supplier',
name='shops',
field=models.ManyToManyField(blank=True, help_text='You can select which particular shops fronts the supplier should be available in.', related_name='suppliers', to='shuup.Shop', verbose_name='shops'),
),
migrations.AlterField(
model_name='supplier',
name='slug',
field=models.SlugField(blank=True, help_text='Enter a URL slug for your supplier. Slug is user- and search engine-friendly short text used in a URL to identify and describe a resource. In this case it will determine what your supplier page URL in the browser address bar will look like. A default will be created using the supplier name.', max_length=255, null=True, verbose_name='slug'),
),
migrations.AlterField(
model_name='supplier',
name='stock_managed',
field=models.BooleanField(default=False, help_text='Enable this if this supplier will manage the inventory of the stocked products. Having a managed stock enabled is unnecessary if e.g. selling digital products that will never run out no matter how many are being sold. There are some other cases when it could be an unnecessary complication. This settingmerely assigns a sensible default behavior, which can be overwritten on a product-by-product basis.', verbose_name='stock managed'),
),
migrations.AlterField(
model_name='supplier',
name='type',
field=enumfields.fields.EnumIntegerField(default=1, enum=shuup.core.models.SupplierType, help_text='The supplier type indicates whether the products are supplied through an internal supplier or an external supplier, and which group this supplier belongs to.', verbose_name='supplier type'),
),
migrations.AlterField(
model_name='tax',
name='amount_value',
field=shuup.core.fields.MoneyValueField(blank=True, decimal_places=9, default=None, help_text='The flat amount of the tax. Mutually exclusive with percentage rates tax.', max_digits=36, null=True, verbose_name='tax amount value'),
),
migrations.AlterField(
model_name='tax',
name='currency',
field=shuup.core.fields.CurrencyField(blank=True, default=None, max_length=4, null=True, verbose_name='currency of the amount tax'),
),
migrations.AlterField(
model_name='tax',
name='enabled',
field=models.BooleanField(default=True, help_text='Enable if this tax is valid and should be active.', verbose_name='enabled'),
),
migrations.AlterField(
model_name='tax',
name='rate',
field=models.DecimalField(blank=True, decimal_places=5, help_text='The percentage rate of the tax. Mutually exclusive with the flat amount tax (flat tax is rarely used and the option is therefore hidden by default; contact Shuup to enable).', max_digits=6, null=True, verbose_name='tax rate'),
),
migrations.AlterField(
model_name='taxclass',
name='enabled',
field=models.BooleanField(default=True, help_text='Enable if this tax class is valid and should be active.', verbose_name='enabled'),
),
migrations.AlterField(
model_name='taxclasstranslation',
name='name',
field=models.CharField(help_text='The tax class name. Tax classes are used to control how taxes are applied to the products.', max_length=100, verbose_name='name'),
),
migrations.AlterField(
model_name='taxtranslation',
name='name',
field=models.CharField(help_text='The name of the tax. It is shown in order lines, in order invoices and confirmations.', max_length=124, verbose_name='name'),
),
migrations.AlterField(
model_name='waivingcostbehaviorcomponent',
name='waive_limit_value',
field=shuup.core.fields.MoneyValueField(decimal_places=9, help_text='The total price of products limit, at which this service cost is waived.', max_digits=36),
),
migrations.AlterField(
model_name='weightbasedpricerange',
name='max_value',
field=shuup.core.fields.MeasurementField(blank=True, decimal_places=9, default=0, help_text='The maximum weight (grams) before this price no longer applies.', max_digits=36, null=True, unit='g', verbose_name='max weight (g)'),
),
migrations.AlterField(
model_name='weightbasedpricerange',
name='min_value',
field=shuup.core.fields.MeasurementField(blank=True, decimal_places=9, default=0, help_text='The minimum weight (grams) for this price to apply.', max_digits=36, null=True, unit='g', verbose_name='min weight (g)'),
),
migrations.AlterField(
model_name='shop',
name='maintenance_mode',
field=models.BooleanField(default=False, help_text="Enable if you want to make your shop temporarily unavailable to visitors while you do regular shop maintenance, fight the security breach or for some other reason. If you don't plan to have this shop open again, change the `Status` on the main `General Information` tab to `Disabled`.", verbose_name='maintenance mode'),
),
]
|
shoopio/shoop
|
shuup/core/migrations/0068_help_text_improvements.py
|
Python
|
agpl-3.0
| 40,311
|
[
"VisIt"
] |
8e4d6aa0fcd83b9ace7b8c6d85253feab03b3b654de86a7b772189916ed03837
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'kmu'
"""
Test to read netcdf files from thredds.met.no
"""
#from matplotlib import pyplot as plt
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import netCDF4
# Access netcdf file via OpenDAP
filename = 'http://thredds.met.no/thredds/dodsC/arome25/arome_norway_default2_5km_20140428_06.nc'
vname = 'air_temperature_2m'
nc = netCDF4.Dataset(filename)
h = nc.variables[vname]
x = nc.variables['x']
y = nc.variables['y']
altitude = nc.variables['altitude'][:, :] # retrieve model topography
bkgmap = nc.variables['land_area_fraction'][:, :]
times = nc.variables['time']
jd = netCDF4.num2date(times[:], times.units)
#a = h[0, :, :]
# Extract required area
a = np.ones(bkgmap.shape) * np.nan
fracy1 = 320
fracy2 = 390
fracx1 = 180
fracx2 = 250
a[fracy1:fracy2, fracx1:fracx2] = h[0, fracy1:fracy2, fracx1:fracx2]
print a.shape, type(a)
# Filter by elevation(band)
za = np.ma.masked_outside(altitude, 1000, 1500)
a[za.mask == True] = np.nan
T0 = 273.15
# Do statistics
print "Mean: {0}".format(np.nanmean(a))
print "Standard deviation: {0}".format(np.nanstd(a, dtype=np.float64))
print "Variance: {0}".format(np.nanvar(a))
print "Average: {0}".format(np.average(a))
print "Min: {0}".format(np.nanmin(a))
print "Max: {0}".format(np.nanmax(a))
'''
Can use 'altitude' to filter out alpine regions or elevation bands
'''
# View map and data
fig = plt.figure(figsize=(10, 12))
ax = fig.add_subplot(111)
xyext = [x[0], x[-1], y[0], y[-1]]
plt.imshow(bkgmap, zorder=0, origin='lower', cmap='pink', extent=xyext)
plt.imshow(a, alpha=0.8, zorder=1, origin='lower', cmap='seismic', extent=xyext)
#plt.legend()
plt.savefig(r'/home/karsten/test/ta_exa.png', dpi=90)
|
kmunve/TSanalysis
|
Test/thredds_test.py
|
Python
|
mit
| 1,766
|
[
"NetCDF"
] |
bc5d5d4062ba292c9b59fec7a911e4417602b887f8b9b302e37d16244952d1d5
|
# Author(s): TJ Lane (tjlane@stanford.edu) and Christian Schwantes
# (schwancr@stanford.edu)
# Contributors: Vince Voelz, Kyle Beauchamp, Robert McGibbon
# Copyright (c) 2014, Stanford University
# All rights reserved.
"""
Functions for performing mean first passage time
calculations for an MSM.
For a useful introduction to Markov Chains (both ergodic
and absorbing) check out Chapter 11 in:
.. [1] Grinstead, C. M. and Snell, J. L. Introduction to
Probability. American Mathematical Soc., 1998.
The absorbing Markov Chain information is interesting, but
note that we are using ergodic Markov Chains.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import scipy
from mdtraj.utils.six.moves import xrange
import copy
__all__ = ['mfpts']
def mfpts(msm, sinks=None, lag_time=1.):
"""
Gets the Mean First Passage Time (MFPT) for all states to a *set*
of sinks.
Parameters
----------
msm : msmbuilder.MarkovStateModel
MSM fit to the data.
sinks : array_like, int, optional
Indices of the sink states. There are two use-cases:
- None [default] : All MFPTs will be calculated, and the
result is a matrix of the MFPT from state i to state j.
This uses the fundamental matrix formalism.
- list of ints or int : Only the MFPTs into these sink
states will be computed. The result is a vector, with
entry i corresponding to the average time it takes to
first get to *any* sink state from state i
lag_time : float, optional
Lag time for the model. The MFPT will be reported in whatever
units are given here. Default is (1) which is in units of the
lag time of the MSM.
Returns
-------
mfpts : np.ndarray, float
MFPT in time units of lag_time, which depends on the input
value of sinks:
- If sinks is None, then mfpts's shape is (n_states, n_states).
Where mfpts[i, j] is the mean first passage time to state j
from state i.
- If sinks contains one or more states, then mfpts's shape
is (n_states,). Where mfpts[i] is the mean first passage
time from state i to any state in sinks.
References
----------
.. [1] Grinstead, C. M. and Snell, J. L. Introduction to
Probability. American Mathematical Soc., 1998.
As of November 2014, this chapter was available for free online:
http://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf
"""
if hasattr(msm, 'all_transmats_'):
mfpts = np.zeros_like(msm.all_transmats_)
for i, el in enumerate(zip(msm.all_transmats_, msm.all_populations_)):
tprob = el[0]
populations = el[1]
mfpts[i, :, :] = _mfpts(tprob, populations, sinks, lag_time)
return np.median(mfpts, axis=0)
return _mfpts(msm.transmat_, msm.populations_, sinks, lag_time)
def _mfpts(tprob, populations, sinks, lag_time):
"""
Gets the Mean First Passage Time (MFPT) for all states to a *set*
of sinks.
Parameters
----------
tprob : np.ndarray
Transition matrix
populations : np.ndarray, (n_states,)
MSM populations
sinks : array_like, int, optional
Indices of the sink states. There are two use-cases:
- None [default] : All MFPTs will be calculated, and the
result is a matrix of the MFPT from state i to state j.
This uses the fundamental matrix formalism.
- list of ints or int : Only the MFPTs into these sink
states will be computed. The result is a vector, with
entry i corresponding to the average time it takes to
first get to *any* sink state from state i
lag_time : float, optional
Lag time for the model. The MFPT will be reported in whatever
units are given here. Default is (1) which is in units of the
lag time of the MSM.
Returns
-------
mfpts : np.ndarray, float
MFPT in time units of lag_time, which depends on the input
value of sinks:
- If sinks is None, then mfpts's shape is (n_states, n_states).
Where mfpts[i, j] is the mean first passage time to state j
from state i.
- If sinks contains one or more states, then mfpts's shape
is (n_states,). Where mfpts[i] is the mean first passage
time from state i to any state in sinks.
References
----------
.. [1] Grinstead, C. M. and Snell, J. L. Introduction to
Probability. American Mathematical Soc., 1998.
As of November 2014, this chapter was available for free online:
http://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf
"""
n_states = np.shape(populations)[0]
if sinks is None:
# Use Thm 11.16 in [1]
limiting_matrix = np.vstack([populations] * n_states)
# Fundamental matrix
fund_matrix = scipy.linalg.inv(np.eye(n_states) - tprob +
limiting_matrix)
# mfpt[i,j] = (fund_matrix[j,j] - fund_matrix[i,j]) / populations[j]
mfpts = fund_matrix * -1
for j in xrange(n_states):
mfpts[:, j] += fund_matrix[j, j]
mfpts[:, j] /= populations[j]
mfpts *= lag_time
else:
# See section 11.5, and use Thm 11.5
# Turn our ergodic MSM into an absorbing one (all sink
# states are absorbing). Then calculate the mean time
# to absorption.
# Note: we are slightly modifying the description in
# 11.5 so that we also get the mfpts[sink] = 0.0
sinks = np.array(sinks, dtype=int).reshape((-1,))
absorb_tprob = copy.copy(tprob)
for state in sinks:
absorb_tprob[state, :] = 0.0
absorb_tprob[state, state] = 2.0
# note it has to be 2 because we subtract
# the identity below.
lhs = np.eye(n_states) - absorb_tprob
rhs = np.ones(n_states)
for state in sinks:
rhs[state] = 0.0
mfpts = lag_time * np.linalg.solve(lhs, rhs)
return mfpts
|
dr-nate/msmbuilder
|
msmbuilder/tpt/mfpt.py
|
Python
|
lgpl-2.1
| 6,342
|
[
"MDTraj"
] |
37355234b729e33977f4a6b4fcebf07136b77f9c7311472423098582550ea11e
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from node import NodeVisitor, ValueNode, BinaryExpressionNode
from parser import precedence
def escape(string, extras=""):
rv = string.encode("utf8").encode("string_escape")
for extra in extras:
rv = rv.replace(extra, "\\" + extra)
return rv
class ManifestSerializer(NodeVisitor):
def __init__(self, skip_empty_data=False):
self.skip_empty_data = skip_empty_data
def serialize(self, root):
self.indent = 2
rv = "\n".join(self.visit(root))
if rv[-1] != "\n":
rv = rv + "\n"
return rv
def visit_DataNode(self, node):
rv = []
if not self.skip_empty_data or node.children:
if node.data:
rv.append("[%s]" % escape(node.data, extras="]"))
indent = self.indent * " "
else:
indent = ""
for child in node.children:
rv.extend("%s%s" % (indent if item else "", item) for item in self.visit(child))
if node.parent:
rv.append("")
return rv
def visit_KeyValueNode(self, node):
rv = [node.data + ":"]
indent = " " * self.indent
if len(node.children) == 1 and isinstance(node.children[0], ValueNode):
rv[0] += " %s" % escape(self.visit(node.children[0])[0])
else:
for child in node.children:
rv.append(indent + self.visit(child)[0])
return rv
def visit_ValueNode(self, node):
return [escape(node.data)]
def visit_ConditionalNode(self, node):
return ["if %s: %s" % tuple(self.visit(item)[0] for item in node.children)]
def visit_StringNode(self, node):
rv = ["\"%s\"" % node.data]
for child in node.children:
rv[0] += self.visit(child)[0]
return rv
def visit_NumberNode(self, node):
return [node.data]
def visit_VariableNode(self, node):
rv = node.data
for child in node.children:
rv += self.visit(child)
return [rv]
def visit_IndexNode(self, node):
assert len(node.children) == 1
return ["[%s]" % self.visit(node.children[0])[0]]
def visit_UnaryExpressionNode(self, node):
children = []
for child in node.children:
child_str = self.visit(child)[0]
if isinstance(child, BinaryExpressionNode):
child_str = "(%s)" % child_str
children.append(child_str)
return [" ".join(children)]
def visit_BinaryExpressionNode(self, node):
assert len(node.children) == 3
children = []
for child_index in [1, 0, 2]:
child = node.children[child_index]
child_str = self.visit(child)[0]
if (isinstance(child, BinaryExpressionNode) and
precedence(node.children[0]) < precedence(child.children[0])):
child_str = "(%s)" % child_str
children.append(child_str)
return [" ".join(children)]
def visit_UnaryOperatorNode(self, node):
return [node.data]
def visit_BinaryOperatorNode(self, node):
return [node.data]
def serialize(tree, *args, **kwargs):
s = ManifestSerializer(*args, **kwargs)
return s.serialize(tree)
|
indykish/servo
|
tests/wpt/harness/wptrunner/wptmanifest/serializer.py
|
Python
|
mpl-2.0
| 3,478
|
[
"VisIt"
] |
9cc539d691c923b15d58fc37c6c626411f26b9a4a4293624cc3d50ddf940ce27
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
import xml
from .common import InfoExtractor
from ..utils import (
clean_html,
dict_get,
ExtractorError,
float_or_none,
get_element_by_class,
int_or_none,
js_to_json,
parse_duration,
parse_iso8601,
try_get,
unescapeHTML,
url_or_none,
urlencode_postdata,
urljoin,
)
from ..compat import (
compat_HTTPError,
compat_urlparse,
)
class BBCCoUkIE(InfoExtractor):
IE_NAME = 'bbc.co.uk'
IE_DESC = 'BBC iPlayer'
_ID_REGEX = r'(?:[pbm][\da-z]{7}|w[\da-z]{7,14})'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?bbc\.co\.uk/
(?:
programmes/(?!articles/)|
iplayer(?:/[^/]+)?/(?:episode/|playlist/)|
music/(?:clips|audiovideo/popular)[/#]|
radio/player/|
events/[^/]+/play/[^/]+/
)
(?P<id>%s)(?!/(?:episodes|broadcasts|clips))
''' % _ID_REGEX
_LOGIN_URL = 'https://account.bbc.com/signin'
_NETRC_MACHINE = 'bbc'
_MEDIASELECTOR_URLS = [
# Provides HQ HLS streams with even better quality that pc mediaset but fails
# with geolocation in some cases when it's even not geo restricted at all (e.g.
# http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable.
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
]
_MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection'
_EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist'
_NAMESPACES = (
_MEDIASELECTION_NS,
_EMP_PLAYLIST_NS,
)
_TESTS = [
{
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
'info_dict': {
'id': 'b039d07m',
'ext': 'flv',
'title': 'Leonard Cohen, Kaleidoscope - BBC Radio 4',
'description': 'The Canadian poet and songwriter reflects on his musical career.',
},
'params': {
# rtmp download
'skip_download': True,
}
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Man in Black: Series 3: The Printed Name',
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
'duration': 1800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Voice UK: Series 3: Blind Auditions 5',
'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.',
'duration': 5100,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
'info_dict': {
'id': 'b03k3pb7',
'ext': 'flv',
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
'description': '2. Invasion',
'duration': 3600,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
}, {
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
'info_dict': {
'id': 'b04v209v',
'ext': 'flv',
'title': 'Pete Tong, The Essential New Tune Special',
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
'duration': 10800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
}, {
'url': 'http://www.bbc.co.uk/music/clips/p022h44b',
'note': 'Audio',
'info_dict': {
'id': 'p022h44j',
'ext': 'flv',
'title': 'BBC Proms Music Guides, Rachmaninov: Symphonic Dances',
'description': "In this Proms Music Guide, Andrew McGregor looks at Rachmaninov's Symphonic Dances.",
'duration': 227,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
'note': 'Video',
'info_dict': {
'id': 'p025c103',
'ext': 'flv',
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
'duration': 226,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
'info_dict': {
'id': 'p02n76xf',
'ext': 'flv',
'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
'info_dict': {
'id': 'b05zmgw1',
'ext': 'flv',
'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
'title': 'Royal Academy Summer Exhibition',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
# iptv-all mediaset fails with geolocation however there is no geo restriction
# for this programme at all
'url': 'http://www.bbc.co.uk/programmes/b06rkn85',
'info_dict': {
'id': 'b06rkms3',
'ext': 'flv',
'title': "Best of the Mini-Mixes 2015: Part 3, Annie Mac's Friday Night - BBC Radio 1",
'description': "Annie has part three in the Best of the Mini-Mixes 2015, plus the year's Most Played!",
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Now it\'s really geo-restricted',
}, {
# compact player (https://github.com/rg3/youtube-dl/issues/8147)
'url': 'http://www.bbc.co.uk/programmes/p028bfkf/player',
'info_dict': {
'id': 'p028bfkj',
'ext': 'flv',
'title': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews',
'description': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/radio/player/p03cchwf',
'only_matching': True,
}, {
'url': 'https://www.bbc.co.uk/music/audiovideo/popular#p055bc55',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/programmes/w3csv1y9',
'only_matching': True,
}, {
'url': 'https://www.bbc.co.uk/programmes/m00005xn',
'only_matching': True,
}, {
'url': 'https://www.bbc.co.uk/programmes/w172w4dww1jqt5s',
'only_matching': True,
}]
_USP_RE = r'/([^/]+?)\.ism(?:\.hlsv2\.ism)?/[^/]+\.m3u8'
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading signin page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'username': username,
'password': password,
})
post_url = urljoin(self._LOGIN_URL, self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
'post url', default=self._LOGIN_URL, group='url'))
response, urlh = self._download_webpage_handle(
post_url, None, 'Logging in', data=urlencode_postdata(login_form),
headers={'Referer': self._LOGIN_URL})
if self._LOGIN_URL in urlh.geturl():
error = clean_html(get_element_by_class('form-message', response))
if error:
raise ExtractorError(
'Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_initialize(self):
self._login()
class MediaSelectionError(Exception):
def __init__(self, id):
self.id = id
def _extract_asx_playlist(self, connection, programme_id):
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
def _extract_items(self, playlist):
return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS)
def _findall_ns(self, element, xpath):
elements = []
for ns in self._NAMESPACES:
elements.extend(element.findall(xpath % ns))
return elements
def _extract_medias(self, media_selection):
error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS)
if error is None:
media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS)
if error is not None:
raise BBCCoUkIE.MediaSelectionError(error.get('id'))
return self._findall_ns(media_selection, './{%s}media')
def _extract_connections(self, media):
return self._findall_ns(media, './{%s}connection')
def _get_subtitles(self, media, programme_id):
subtitles = {}
for connection in self._extract_connections(media):
cc_url = url_or_none(connection.get('href'))
if not cc_url:
continue
captions = self._download_xml(
cc_url, programme_id, 'Downloading captions', fatal=False)
if not isinstance(captions, xml.etree.ElementTree.Element):
continue
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
subtitles[lang] = [
{
'url': connection.get('href'),
'ext': 'ttml',
},
]
return subtitles
def _raise_extractor_error(self, media_selection_error):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, media_selection_error.id),
expected=True)
def _download_media_selector(self, programme_id):
last_exception = None
for mediaselector_url in self._MEDIASELECTOR_URLS:
try:
return self._download_media_selector_url(
mediaselector_url % programme_id, programme_id)
except BBCCoUkIE.MediaSelectionError as e:
if e.id in ('notukerror', 'geolocation', 'selectionunavailable'):
last_exception = e
continue
self._raise_extractor_error(e)
self._raise_extractor_error(last_exception)
def _download_media_selector_url(self, url, programme_id=None):
media_selection = self._download_xml(
url, programme_id, 'Downloading media selection XML',
expected_status=(403, 404))
return self._process_media_selector(media_selection, programme_id)
def _process_media_selector(self, media_selection, programme_id):
formats = []
subtitles = None
urls = []
for media in self._extract_medias(media_selection):
kind = media.get('kind')
if kind in ('video', 'audio'):
bitrate = int_or_none(media.get('bitrate'))
encoding = media.get('encoding')
service = media.get('service')
width = int_or_none(media.get('width'))
height = int_or_none(media.get('height'))
file_size = int_or_none(media.get('media_file_size'))
for connection in self._extract_connections(media):
href = connection.get('href')
if href in urls:
continue
if href:
urls.append(href)
conn_kind = connection.get('kind')
protocol = connection.get('protocol')
supplier = connection.get('supplier')
transfer_format = connection.get('transferFormat')
format_id = supplier or conn_kind or protocol
if service:
format_id = '%s_%s' % (service, format_id)
# ASX playlist
if supplier == 'asx':
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
formats.append({
'url': ref,
'format_id': 'ref%s_%s' % (i, format_id),
})
elif transfer_format == 'dash':
formats.extend(self._extract_mpd_formats(
href, programme_id, mpd_id=format_id, fatal=False))
elif transfer_format == 'hls':
formats.extend(self._extract_m3u8_formats(
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False))
if re.search(self._USP_RE, href):
usp_formats = self._extract_m3u8_formats(
re.sub(self._USP_RE, r'/\1.ism/\1.m3u8', href),
programme_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False)
for f in usp_formats:
if f.get('height') and f['height'] > 720:
continue
formats.append(f)
elif transfer_format == 'hds':
formats.extend(self._extract_f4m_formats(
href, programme_id, f4m_id=format_id, fatal=False))
else:
if not service and not supplier and bitrate:
format_id += '-%d' % bitrate
fmt = {
'format_id': format_id,
'filesize': file_size,
}
if kind == 'video':
fmt.update({
'width': width,
'height': height,
'tbr': bitrate,
'vcodec': encoding,
})
else:
fmt.update({
'abr': bitrate,
'acodec': encoding,
'vcodec': 'none',
})
if protocol in ('http', 'https'):
# Direct link
fmt.update({
'url': href,
})
elif protocol == 'rtmp':
application = connection.get('application', 'ondemand')
auth_string = connection.get('authString')
identifier = connection.get('identifier')
server = connection.get('server')
fmt.update({
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
'play_path': identifier,
'app': '%s?%s' % (application, auth_string),
'page_url': 'http://www.bbc.co.uk',
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
'rtmp_live': False,
'ext': 'flv',
})
else:
continue
formats.append(fmt)
elif kind == 'captions':
subtitles = self.extract_subtitles(media, programme_id)
return formats, subtitles
def _download_playlist(self, playlist_id):
try:
playlist = self._download_json(
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
playlist_id, 'Downloading playlist JSON')
version = playlist.get('defaultAvailableVersion')
if version:
smp_config = version['smpConfig']
title = smp_config['title']
description = smp_config['summary']
for item in smp_config['items']:
kind = item['kind']
if kind not in ('programme', 'radioProgramme'):
continue
programme_id = item.get('vpid')
duration = int_or_none(item.get('duration'))
formats, subtitles = self._download_media_selector(programme_id)
return programme_id, title, description, duration, formats, subtitles
except ExtractorError as ee:
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
raise
# fallback to legacy playlist
return self._process_legacy_playlist(playlist_id)
def _process_legacy_playlist_url(self, url, display_id):
playlist = self._download_legacy_playlist_url(url, display_id)
return self._extract_from_legacy_playlist(playlist, display_id)
def _process_legacy_playlist(self, playlist_id):
return self._process_legacy_playlist_url(
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id)
def _download_legacy_playlist_url(self, url, playlist_id=None):
return self._download_xml(
url, playlist_id, 'Downloading legacy playlist XML')
def _extract_from_legacy_playlist(self, playlist, playlist_id):
no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS)
if no_items is not None:
reason = no_items.get('reason')
if reason == 'preAvailability':
msg = 'Episode %s is not yet available' % playlist_id
elif reason == 'postAvailability':
msg = 'Episode %s is no longer available' % playlist_id
elif reason == 'noMedia':
msg = 'Episode %s is not currently available' % playlist_id
else:
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
raise ExtractorError(msg, expected=True)
for item in self._extract_items(playlist):
kind = item.get('kind')
if kind not in ('programme', 'radioProgramme'):
continue
title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text
description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS)
description = description_el.text if description_el is not None else None
def get_programme_id(item):
def get_from_attributes(item):
for p in('identifier', 'group'):
value = item.get(p)
if value and re.match(r'^[pb][\da-z]{7}$', value):
return value
get_from_attributes(item)
mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS)
if mediator is not None:
return get_from_attributes(mediator)
programme_id = get_programme_id(item)
duration = int_or_none(item.get('duration'))
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
else:
formats, subtitles = self._process_media_selector(item, playlist_id)
programme_id = playlist_id
return programme_id, title, description, duration, formats, subtitles
def _real_extract(self, url):
group_id = self._match_id(url)
webpage = self._download_webpage(url, group_id, 'Downloading video page')
error = self._search_regex(
r'<div\b[^>]+\bclass=["\']smp__message delta["\'][^>]*>([^<]+)<',
webpage, 'error', default=None)
if error:
raise ExtractorError(error, expected=True)
programme_id = None
duration = None
tviplayer = self._search_regex(
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
webpage, 'player', default=None)
if tviplayer:
player = self._parse_json(tviplayer, group_id).get('player', {})
duration = int_or_none(player.get('duration'))
programme_id = player.get('vpid')
if not programme_id:
programme_id = self._search_regex(
r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
(r'<h2[^>]+id="parent-title"[^>]*>(.+?)</h2>',
r'<div[^>]+class="info"[^>]*>\s*<h1>(.+?)</h1>'), webpage, 'title')
description = self._search_regex(
(r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
r'<div[^>]+class="info_+synopsis"[^>]*>([^<]+)</div>'),
webpage, 'description', default=None)
if not description:
description = self._html_search_meta('description', webpage)
else:
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
class BBCIE(BBCCoUkIE):
IE_NAME = 'bbc'
IE_DESC = 'BBC'
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
_MEDIASELECTOR_URLS = [
# Provides HQ HLS streams but fails with geolocation in some cases when it's
# even not geo restricted at all
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
# Provides more formats, namely direct mp4 links, but fails on some videos with
# notukerror for non UK (?) users (e.g.
# http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
# Provides fewer formats, but works everywhere for everybody (hopefully)
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
]
_TESTS = [{
# article with multiple videos embedded with data-playable containing vpids
'url': 'http://www.bbc.com/news/world-europe-32668511',
'info_dict': {
'id': 'world-europe-32668511',
'title': 'Russia stages massive WW2 parade despite Western boycott',
'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
},
'playlist_count': 2,
}, {
# article with multiple videos embedded with data-playable (more videos)
'url': 'http://www.bbc.com/news/business-28299555',
'info_dict': {
'id': 'business-28299555',
'title': 'Farnborough Airshow: Video highlights',
'description': 'BBC reports and video highlights at the Farnborough Airshow.',
},
'playlist_count': 9,
'skip': 'Save time',
}, {
# article with multiple videos embedded with `new SMP()`
# broken
'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
'info_dict': {
'id': '3662a707-0af9-3149-963f-47bea720b460',
'title': 'BUGGER',
},
'playlist_count': 18,
}, {
# single video embedded with data-playable containing vpid
'url': 'http://www.bbc.com/news/world-europe-32041533',
'info_dict': {
'id': 'p02mprgb',
'ext': 'mp4',
'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
'description': 'md5:2868290467291b37feda7863f7a83f54',
'duration': 47,
'timestamp': 1427219242,
'upload_date': '20150324',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# article with single video embedded with data-playable containing XML playlist
# with direct video links as progressiveDownloadUrl (for now these are extracted)
# and playlist with f4m and m3u8 as streamingUrl
'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
'info_dict': {
'id': '150615_telabyad_kentin_cogu',
'ext': 'mp4',
'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
'description': 'md5:33a4805a855c9baf7115fcbde57e7025',
'timestamp': 1434397334,
'upload_date': '20150615',
},
'params': {
'skip_download': True,
}
}, {
# single video embedded with data-playable containing XML playlists (regional section)
'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
'info_dict': {
'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
'ext': 'mp4',
'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
'description': 'md5:1525f17448c4ee262b64b8f0c9ce66c8',
'timestamp': 1434713142,
'upload_date': '20150619',
},
'params': {
'skip_download': True,
}
}, {
# single video from video playlist embedded with vxp-playlist-data JSON
'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
'info_dict': {
'id': 'p02w6qjc',
'ext': 'mp4',
'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
'duration': 56,
'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
},
'params': {
'skip_download': True,
}
}, {
# single video story with digitalData
'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
'info_dict': {
'id': 'p02q6gc4',
'ext': 'flv',
'title': 'Sri Lanka’s spicy secret',
'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
'timestamp': 1437674293,
'upload_date': '20150723',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video story without digitalData
'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
'info_dict': {
'id': 'p018zqqg',
'ext': 'mp4',
'title': 'Hyundai Santa Fe Sport: Rock star',
'description': 'md5:b042a26142c4154a6e472933cf20793d',
'timestamp': 1415867444,
'upload_date': '20141113',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# single video embedded with Morph
'url': 'http://www.bbc.co.uk/sport/live/olympics/36895975',
'info_dict': {
'id': 'p041vhd0',
'ext': 'mp4',
'title': "Nigeria v Japan - Men's First Round",
'description': 'Live coverage of the first round from Group B at the Amazonia Arena.',
'duration': 7980,
'uploader': 'BBC Sport',
'uploader_id': 'bbc_sport',
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Georestricted to UK',
}, {
# single video with playlist.sxml URL in playlist param
'url': 'http://www.bbc.com/sport/0/football/33653409',
'info_dict': {
'id': 'p02xycnp',
'ext': 'mp4',
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.',
'duration': 140,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# article with multiple videos embedded with playlist.sxml in playlist param
'url': 'http://www.bbc.com/sport/0/football/34475836',
'info_dict': {
'id': '34475836',
'title': 'Jurgen Klopp: Furious football from a witty and winning coach',
'description': 'Fast-paced football, wit, wisdom and a ready smile - why Liverpool fans should come to love new boss Jurgen Klopp.',
},
'playlist_count': 3,
}, {
# school report article with single video
'url': 'http://www.bbc.co.uk/schoolreport/35744779',
'info_dict': {
'id': '35744779',
'title': 'School which breaks down barriers in Jerusalem',
},
'playlist_count': 1,
}, {
# single video with playlist URL from weather section
'url': 'http://www.bbc.com/weather/features/33601775',
'only_matching': True,
}, {
# custom redirection to www.bbc.com
'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
'only_matching': True,
}, {
# single video article embedded with data-media-vpid
'url': 'http://www.bbc.co.uk/sport/rowing/35908187',
'only_matching': True,
}, {
'url': 'https://www.bbc.co.uk/bbcthree/clip/73d0bbd0-abc3-4cea-b3c0-cdae21905eb1',
'info_dict': {
'id': 'p06556y7',
'ext': 'mp4',
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
'description': 'md5:4b7dfd063d5a789a1512e99662be3ddd',
},
'params': {
'skip_download': True,
}
}, {
# window.__PRELOADED_STATE__
'url': 'https://www.bbc.co.uk/radio/play/b0b9z4yl',
'info_dict': {
'id': 'b0b9z4vz',
'ext': 'mp4',
'title': 'Prom 6: An American in Paris and Turangalila',
'description': 'md5:51cf7d6f5c8553f197e58203bc78dff8',
'uploader': 'Radio 3',
'uploader_id': 'bbc_radio_three',
},
}, {
'url': 'http://www.bbc.co.uk/learningenglish/chinese/features/lingohack/ep-181227',
'info_dict': {
'id': 'p06w9tws',
'ext': 'mp4',
'title': 'md5:2fabf12a726603193a2879a055f72514',
'description': 'Learn English words and phrases from this story',
},
'add_ie': [BBCCoUkIE.ie_key()],
}]
@classmethod
def suitable(cls, url):
EXCLUDE_IE = (BBCCoUkIE, BBCCoUkArticleIE, BBCCoUkIPlayerPlaylistIE, BBCCoUkPlaylistIE)
return (False if any(ie.suitable(url) for ie in EXCLUDE_IE)
else super(BBCIE, cls).suitable(url))
def _extract_from_media_meta(self, media_meta, video_id):
# Direct links to media in media metadata (e.g.
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
# TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml
source_files = media_meta.get('sourceFiles')
if source_files:
return [{
'url': f['url'],
'format_id': format_id,
'ext': f.get('encoding'),
'tbr': float_or_none(f.get('bitrate'), 1000),
'filesize': int_or_none(f.get('filesize')),
} for format_id, f in source_files.items() if f.get('url')], []
programme_id = media_meta.get('externalId')
if programme_id:
return self._download_media_selector(programme_id)
# Process playlist.sxml as legacy playlist
href = media_meta.get('href')
if href:
playlist = self._download_legacy_playlist_url(href)
_, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id)
return formats, subtitles
return [], []
def _extract_from_playlist_sxml(self, url, playlist_id, timestamp):
programme_id, title, description, duration, formats, subtitles = \
self._process_legacy_playlist_url(url, playlist_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
json_ld_info = self._search_json_ld(webpage, playlist_id, default={})
timestamp = json_ld_info.get('timestamp')
playlist_title = json_ld_info.get('title')
if not playlist_title:
playlist_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'<title>(.+?)</title>', webpage, 'playlist title', default=None)
if playlist_title:
playlist_title = re.sub(r'(.+)\s*-\s*BBC.*?$', r'\1', playlist_title).strip()
playlist_description = json_ld_info.get(
'description') or self._og_search_description(webpage, default=None)
if not timestamp:
timestamp = parse_iso8601(self._search_regex(
[r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
r'itemprop="datePublished"[^>]+datetime="([^"]+)"',
r'"datePublished":\s*"([^"]+)'],
webpage, 'date', default=None))
entries = []
# article with multiple videos embedded with playlist.sxml (e.g.
# http://www.bbc.com/sport/0/football/34475836)
playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage)
playlists.extend(re.findall(r'data-media-id="([^"]+/playlist\.sxml)"', webpage))
if playlists:
entries = [
self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp)
for playlist_url in playlists]
# news article with multiple videos embedded with data-playable
data_playables = re.findall(r'data-playable=(["\'])({.+?})\1', webpage)
if data_playables:
for _, data_playable_json in data_playables:
data_playable = self._parse_json(
unescapeHTML(data_playable_json), playlist_id, fatal=False)
if not data_playable:
continue
settings = data_playable.get('settings', {})
if settings:
# data-playable with video vpid in settings.playlistObject.items (e.g.
# http://www.bbc.com/news/world-us-canada-34473351)
playlist_object = settings.get('playlistObject', {})
if playlist_object:
items = playlist_object.get('items')
if items and isinstance(items, list):
title = playlist_object['title']
description = playlist_object.get('summary')
duration = int_or_none(items[0].get('duration'))
programme_id = items[0].get('vpid')
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
entries.append({
'id': programme_id,
'title': title,
'description': description,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
})
else:
# data-playable without vpid but with a playlist.sxml URLs
# in otherSettings.playlist (e.g.
# http://www.bbc.com/turkce/multimedya/2015/10/151010_vid_ankara_patlama_ani)
playlist = data_playable.get('otherSettings', {}).get('playlist', {})
if playlist:
entry = None
for key in ('streaming', 'progressiveDownload'):
playlist_url = playlist.get('%sUrl' % key)
if not playlist_url:
continue
try:
info = self._extract_from_playlist_sxml(
playlist_url, playlist_id, timestamp)
if not entry:
entry = info
else:
entry['title'] = info['title']
entry['formats'].extend(info['formats'])
except Exception as e:
# Some playlist URL may fail with 500, at the same time
# the other one may work fine (e.g.
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 500:
continue
raise
if entry:
self._sort_formats(entry['formats'])
entries.append(entry)
if entries:
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
# http://www.bbc.co.uk/learningenglish/chinese/features/lingohack/ep-181227
group_id = self._search_regex(
r'<div[^>]+\bclass=["\']video["\'][^>]+\bdata-pid=["\'](%s)' % self._ID_REGEX,
webpage, 'group id', default=None)
if playlist_id:
return self.url_result(
'https://www.bbc.co.uk/programmes/%s' % group_id,
ie=BBCCoUkIE.ie_key())
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
programme_id = self._search_regex(
[r'data-(?:video-player|media)-vpid="(%s)"' % self._ID_REGEX,
r'<param[^>]+name="externalIdentifier"[^>]+value="(%s)"' % self._ID_REGEX,
r'videoId\s*:\s*["\'](%s)["\']' % self._ID_REGEX],
webpage, 'vpid', default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
# digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star)
digital_data = self._parse_json(
self._search_regex(
r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'),
programme_id, fatal=False)
page_info = digital_data.get('page', {}).get('pageInfo', {})
title = page_info.get('pageName') or self._og_search_title(webpage)
description = page_info.get('description') or self._og_search_description(webpage)
timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp
return {
'id': programme_id,
'title': title,
'description': description,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
}
# Morph based embed (e.g. http://www.bbc.co.uk/sport/live/olympics/36895975)
# There are several setPayload calls may be present but the video
# seems to be always related to the first one
morph_payload = self._parse_json(
self._search_regex(
r'Morph\.setPayload\([^,]+,\s*({.+?})\);',
webpage, 'morph payload', default='{}'),
playlist_id, fatal=False)
if morph_payload:
components = try_get(morph_payload, lambda x: x['body']['components'], list) or []
for component in components:
if not isinstance(component, dict):
continue
lead_media = try_get(component, lambda x: x['props']['leadMedia'], dict)
if not lead_media:
continue
identifiers = lead_media.get('identifiers')
if not identifiers or not isinstance(identifiers, dict):
continue
programme_id = identifiers.get('vpid') or identifiers.get('playablePid')
if not programme_id:
continue
title = lead_media.get('title') or self._og_search_title(webpage)
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
description = lead_media.get('summary')
uploader = lead_media.get('masterBrand')
uploader_id = lead_media.get('mid')
duration = None
duration_d = lead_media.get('duration')
if isinstance(duration_d, dict):
duration = parse_duration(dict_get(
duration_d, ('rawDuration', 'formattedDuration', 'spokenDuration')))
return {
'id': programme_id,
'title': title,
'description': description,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'formats': formats,
'subtitles': subtitles,
}
preload_state = self._parse_json(self._search_regex(
r'window\.__PRELOADED_STATE__\s*=\s*({.+?});', webpage,
'preload state', default='{}'), playlist_id, fatal=False)
if preload_state:
current_programme = preload_state.get('programmes', {}).get('current') or {}
programme_id = current_programme.get('id')
if current_programme and programme_id and current_programme.get('type') == 'playable_item':
title = current_programme.get('titles', {}).get('tertiary') or playlist_title
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
synopses = current_programme.get('synopses') or {}
network = current_programme.get('network') or {}
duration = int_or_none(
current_programme.get('duration', {}).get('value'))
thumbnail = None
image_url = current_programme.get('image_url')
if image_url:
thumbnail = image_url.replace('{recipe}', '1920x1920')
return {
'id': programme_id,
'title': title,
'description': dict_get(synopses, ('long', 'medium', 'short')),
'thumbnail': thumbnail,
'duration': duration,
'uploader': network.get('short_title'),
'uploader_id': network.get('id'),
'formats': formats,
'subtitles': subtitles,
}
bbc3_config = self._parse_json(
self._search_regex(
r'(?s)bbcthreeConfig\s*=\s*({.+?})\s*;\s*<', webpage,
'bbcthree config', default='{}'),
playlist_id, transform_source=js_to_json, fatal=False)
if bbc3_config:
bbc3_playlist = try_get(
bbc3_config, lambda x: x['payload']['content']['bbcMedia']['playlist'],
dict)
if bbc3_playlist:
playlist_title = bbc3_playlist.get('title') or playlist_title
thumbnail = bbc3_playlist.get('holdingImageURL')
entries = []
for bbc3_item in bbc3_playlist['items']:
programme_id = bbc3_item.get('versionID')
if not programme_id:
continue
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
entries.append({
'id': programme_id,
'title': playlist_title,
'thumbnail': thumbnail,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
})
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description)
def extract_all(pattern):
return list(filter(None, map(
lambda s: self._parse_json(s, playlist_id, fatal=False),
re.findall(pattern, webpage))))
# Multiple video article (e.g.
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX
entries = []
for match in extract_all(r'new\s+SMP\(({.+?})\)'):
embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
if embed_url and re.match(EMBED_URL, embed_url):
entries.append(embed_url)
entries.extend(re.findall(
r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage))
if entries:
return self.playlist_result(
[self.url_result(entry_, 'BBCCoUk') for entry_ in entries],
playlist_id, playlist_title, playlist_description)
# Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511)
medias = extract_all(r"data-media-meta='({[^']+})'")
if not medias:
# Single video article (e.g. http://www.bbc.com/news/video_and_audio/international)
media_asset = self._search_regex(
r'mediaAssetPage\.init\(\s*({.+?}), "/',
webpage, 'media asset', default=None)
if media_asset:
media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False)
medias = []
for video in media_asset_page.get('videos', {}).values():
medias.extend(video.values())
if not medias:
# Multiple video playlist with single `now playing` entry (e.g.
# http://www.bbc.com/news/video_and_audio/must_see/33767813)
vxp_playlist = self._parse_json(
self._search_regex(
r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>',
webpage, 'playlist data'),
playlist_id)
playlist_medias = []
for item in vxp_playlist:
media = item.get('media')
if not media:
continue
playlist_medias.append(media)
# Download single video if found media with asset id matching the video id from URL
if item.get('advert', {}).get('assetId') == playlist_id:
medias = [media]
break
# Fallback to the whole playlist
if not medias:
medias = playlist_medias
entries = []
for num, media_meta in enumerate(medias, start=1):
formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
if not formats:
continue
self._sort_formats(formats)
video_id = media_meta.get('externalId')
if not video_id:
video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num)
title = media_meta.get('caption')
if not title:
title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num)
duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration'))
images = []
for image in media_meta.get('images', {}).values():
images.extend(image.values())
if 'image' in media_meta:
images.append(media_meta['image'])
thumbnails = [{
'url': image.get('href'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in images]
entries.append({
'id': video_id,
'title': title,
'thumbnails': thumbnails,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'subtitles': subtitles,
})
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
class BBCCoUkArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)'
IE_NAME = 'bbc.co.uk:article'
IE_DESC = 'BBC articles'
_TEST = {
'url': 'http://www.bbc.co.uk/programmes/articles/3jNQLTMrPlYGTBn0WV6M2MS/not-your-typical-role-model-ada-lovelace-the-19th-century-programmer',
'info_dict': {
'id': '3jNQLTMrPlYGTBn0WV6M2MS',
'title': 'Calculating Ada: The Countess of Computing - Not your typical role model: Ada Lovelace the 19th century programmer - BBC Four',
'description': 'Hannah Fry reveals some of her surprising discoveries about Ada Lovelace during filming.',
},
'playlist_count': 4,
'add_ie': ['BBCCoUk'],
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage).strip()
entries = [self.url_result(programme_url) for programme_url in re.findall(
r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)]
return self.playlist_result(entries, playlist_id, title, description)
class BBCCoUkPlaylistBaseIE(InfoExtractor):
def _entries(self, webpage, url, playlist_id):
single_page = 'page' in compat_urlparse.parse_qs(
compat_urlparse.urlparse(url).query)
for page_num in itertools.count(2):
for video_id in re.findall(
self._VIDEO_ID_TEMPLATE % BBCCoUkIE._ID_REGEX, webpage):
yield self.url_result(
self._URL_TEMPLATE % video_id, BBCCoUkIE.ie_key())
if single_page:
return
next_page = self._search_regex(
r'<li[^>]+class=(["\'])pagination_+next\1[^>]*><a[^>]+href=(["\'])(?P<url>(?:(?!\2).)+)\2',
webpage, 'next page url', default=None, group='url')
if not next_page:
break
webpage = self._download_webpage(
compat_urlparse.urljoin(url, next_page), playlist_id,
'Downloading page %d' % page_num, page_num)
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title, description = self._extract_title_and_description(webpage)
return self.playlist_result(
self._entries(webpage, url, playlist_id),
playlist_id, title, description)
class BBCCoUkIPlayerPlaylistIE(BBCCoUkPlaylistBaseIE):
IE_NAME = 'bbc.co.uk:iplayer:playlist'
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/iplayer/(?:episodes|group)/(?P<id>%s)' % BBCCoUkIE._ID_REGEX
_URL_TEMPLATE = 'http://www.bbc.co.uk/iplayer/episode/%s'
_VIDEO_ID_TEMPLATE = r'data-ip-id=["\'](%s)'
_TESTS = [{
'url': 'http://www.bbc.co.uk/iplayer/episodes/b05rcz9v',
'info_dict': {
'id': 'b05rcz9v',
'title': 'The Disappearance',
'description': 'French thriller serial about a missing teenager.',
},
'playlist_mincount': 6,
'skip': 'This programme is not currently available on BBC iPlayer',
}, {
# Available for over a year unlike 30 days for most other programmes
'url': 'http://www.bbc.co.uk/iplayer/group/p02tcc32',
'info_dict': {
'id': 'p02tcc32',
'title': 'Bohemian Icons',
'description': 'md5:683e901041b2fe9ba596f2ab04c4dbe7',
},
'playlist_mincount': 10,
}]
def _extract_title_and_description(self, webpage):
title = self._search_regex(r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False)
description = self._search_regex(
r'<p[^>]+class=(["\'])subtitle\1[^>]*>(?P<value>[^<]+)</p>',
webpage, 'description', fatal=False, group='value')
return title, description
class BBCCoUkPlaylistIE(BBCCoUkPlaylistBaseIE):
IE_NAME = 'bbc.co.uk:playlist'
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/programmes/(?P<id>%s)/(?:episodes|broadcasts|clips)' % BBCCoUkIE._ID_REGEX
_URL_TEMPLATE = 'http://www.bbc.co.uk/programmes/%s'
_VIDEO_ID_TEMPLATE = r'data-pid=["\'](%s)'
_TESTS = [{
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/clips',
'info_dict': {
'id': 'b05rcz9v',
'title': 'The Disappearance - Clips - BBC Four',
'description': 'French thriller serial about a missing teenager.',
},
'playlist_mincount': 7,
}, {
# multipage playlist, explicit page
'url': 'http://www.bbc.co.uk/programmes/b00mfl7n/clips?page=1',
'info_dict': {
'id': 'b00mfl7n',
'title': 'Frozen Planet - Clips - BBC One',
'description': 'md5:65dcbf591ae628dafe32aa6c4a4a0d8c',
},
'playlist_mincount': 24,
}, {
# multipage playlist, all pages
'url': 'http://www.bbc.co.uk/programmes/b00mfl7n/clips',
'info_dict': {
'id': 'b00mfl7n',
'title': 'Frozen Planet - Clips - BBC One',
'description': 'md5:65dcbf591ae628dafe32aa6c4a4a0d8c',
},
'playlist_mincount': 142,
}, {
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/broadcasts/2016/06',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/clips',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/programmes/b055jkys/episodes/player',
'only_matching': True,
}]
def _extract_title_and_description(self, webpage):
title = self._og_search_title(webpage, fatal=False)
description = self._og_search_description(webpage)
return title, description
|
kidburglar/youtube-dl
|
youtube_dl/extractor/bbc.py
|
Python
|
unlicense
| 59,081
|
[
"VisIt"
] |
9d7a37a3e8fd1312a7d42c280424d4460170ed23839a4c9b5fe233108a1c421e
|
import matplotlib.pyplot as plt
import numpy as np
import glob
#delete the headers every timesteps (snapshots)
'''
sed -i '/ITEM: TIMESTEP/,+8d' dump.test
'''
natoms = 86718 #2023 21054 9126
nsnaps = 31 #no. of snaps
filename = 'dump.test' # lammps dump file after processing with 'sed' command
flist = glob.glob(filename)
for f in flist:
load = np.genfromtxt(f, dtype=float, usecols=(1,2,3,4)) #atomtype, x, y, z
data=np.array(load)
data_split = np.zeros((nsnaps, natoms, 4))
data_split [:] = np.split(data,nsnaps)
# check data_splits again by printing it
print data_split [0,1,1] #snap, row, column
#writing the New Dumpfile with only atoms needed
natoms_new = 35971 #number of atoms after deletion
xlo = -6.684e-2 #-2.061e-1 #4.2366e-1 #1.5297e-1
xhi = 1.7819e2 #1.787e2 #1.77316e2 #1.765e2
ylo = 4.284e-1 #5.0795e-1 #5.7174e-1 #8.7306e-2
yhi = 8.919599e1 #8.904e1 #8.877e1 #8.82078e1
zlo = 4.1064e-1 #6.53536e-1 #1.3324855 #1.426e-1
zhi = 8.92e1 #8.888e1 #8.8e1 #8.7307e1
init_length = abs(xhi-xlo)
outFile = open('dump.GPCSH2.0_deleted', 'w')
for s in range(nsnaps):
#outFile = open('dump.GPCSH1.2_deleted', 'a')
outFile.write('ITEM: TIMESTEP \n')
outFile.write('%i \n' %(s))
outFile.write('ITEM: NUMBER OF ATOMS \n')
outFile.write('%i \n' %(natoms_new))
outFile.write('ITEM: BOX BOUNDS pp pp pp \n')
outFile.write('%f %f \n' %(xlo-0.005*s*init_length, xhi+0.005*s*init_length))
outFile.write('%f %f \n' %(ylo, yhi))
outFile.write('%f %f \n' %(zlo, zhi))
outFile.write('ITEM: ATOMS id type xs ys zs \n')
for i in range(natoms):
if int(data_split [s,i,0]) == 1:
outFile.write('%i %i %f %f %f \n' %(i+1, data_split [s,i,0], data_split [s,i,1], data_split [s,i,2], data_split [s,i,3]))
elif int(data_split [s,i,0]) == 2:
outFile.write('%i %i %f %f %f \n' %(i+1, data_split [s,i,0], data_split [s,i,1], data_split [s,i,2], data_split [s,i,3]))
elif int(data_split [s,i,0]) == 10:
outFile.write('%i %i %f %f %f \n' %(i+1, 10, data_split [s,i,1], data_split [s,i,2], data_split [s,i,3]))
elif int(data_split [s,i,0]) == 11:
outFile.write('%i %i %f %f %f \n' %(i+1, 11, data_split [s,i,1], data_split [s,i,2], data_split [s,i,3]))
outFile.close()
print "All done!"
|
msadat/python-scripts
|
del_types_dump_write.py
|
Python
|
gpl-3.0
| 2,281
|
[
"LAMMPS"
] |
4c0489be1ac16a750627affcc4aabad36f69b67fc68131bad710358d565786d5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Implementation of defect correction methods.
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
import scipy
from scipy import stats
from pymatgen.analysis.defects.core import DefectCorrection
from pymatgen.analysis.defects.utils import (
QModel,
ang_to_bohr,
converge,
eV_to_k,
generate_R_and_G_vecs,
generate_reciprocal_vectors_squared,
hart_to_ev,
kumagai_to_V,
tune_for_gamma,
)
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "Mar 15, 2018"
logger = logging.getLogger(__name__)
class FreysoldtCorrection(DefectCorrection):
"""
A class for FreysoldtCorrection class. Largely adapated from PyCDT code
If this correction is used, please reference Freysoldt's original paper.
doi: 10.1103/PhysRevLett.102.016402
"""
def __init__(
self,
dielectric_const,
q_model=None,
energy_cutoff=520,
madetol=0.0001,
axis=None,
):
"""
Initializes the FreysoldtCorrection class
Args:
dielectric_const (float or 3x3 matrix): Dielectric constant for the structure
q_model (QModel): instantiated QModel object or None.
Uses default parameters to instantiate QModel if None supplied
energy_cutoff (int): Maximum energy in eV in reciprocal space to perform
integration for potential correction.
madeltol(float): Convergence criteria for the Madelung energy for potential correction
axis (int): Axis to calculate correction.
If axis is None, then averages over all three axes is performed.
"""
self.q_model = QModel() if not q_model else q_model
self.energy_cutoff = energy_cutoff
self.madetol = madetol
self.dielectric_const = dielectric_const
if isinstance(dielectric_const, (int, float)):
self.dielectric = float(dielectric_const)
else:
self.dielectric = float(np.mean(np.diag(dielectric_const)))
self.axis = axis
self.metadata = {"pot_plot_data": {}, "pot_corr_uncertainty_md": {}}
def get_correction(self, entry):
"""
Gets the Freysoldt correction for a defect entry
Args:
entry (DefectEntry): defect entry to compute Freysoldt correction on.
Requires following keys to exist in DefectEntry.parameters dict:
axis_grid (3 x NGX where NGX is the length of the NGX grid
in the x,y and z axis directions. Same length as planar
average lists):
A list of 3 numpy arrays which contain the cartesian axis
values (in angstroms) that correspond to each planar avg
potential supplied.
bulk_planar_averages (3 x NGX where NGX is the length of
the NGX grid in the x,y and z axis directions.):
A list of 3 numpy arrays which contain the planar averaged
electrostatic potential for the bulk supercell.
defect_planar_averages (3 x NGX where NGX is the length of
the NGX grid in the x,y and z axis directions.):
A list of 3 numpy arrays which contain the planar averaged
electrostatic potential for the defective supercell.
initial_defect_structure (Structure) structure corresponding to
initial defect supercell structure (uses Lattice for charge correction)
defect_frac_sc_coords (3 x 1 array) Fractional co-ordinates of
defect location in supercell structure
Returns:
FreysoldtCorrection values as a dictionary
"""
if self.axis is None:
list_axis_grid = np.array(entry.parameters["axis_grid"])
list_bulk_plnr_avg_esp = np.array(entry.parameters["bulk_planar_averages"])
list_defect_plnr_avg_esp = np.array(entry.parameters["defect_planar_averages"])
list_axes = range(len(list_axis_grid))
else:
list_axes = np.array(self.axis)
list_axis_grid, list_bulk_plnr_avg_esp, list_defect_plnr_avg_esp = (
[],
[],
[],
)
for ax in list_axes:
list_axis_grid.append(np.array(entry.parameters["axis_grid"][ax]))
list_bulk_plnr_avg_esp.append(np.array(entry.parameters["bulk_planar_averages"][ax]))
list_defect_plnr_avg_esp.append(np.array(entry.parameters["defect_planar_averages"][ax]))
lattice = entry.parameters["initial_defect_structure"].lattice.copy()
defect_frac_coords = entry.parameters["defect_frac_sc_coords"]
q = entry.defect.charge
es_corr = self.perform_es_corr(lattice, entry.charge)
pot_corr_tracker = []
for x, pureavg, defavg, axis in zip(
list_axis_grid, list_bulk_plnr_avg_esp, list_defect_plnr_avg_esp, list_axes
):
tmp_pot_corr = self.perform_pot_corr(
x,
pureavg,
defavg,
lattice,
entry.charge,
defect_frac_coords,
axis,
widthsample=1.0,
)
pot_corr_tracker.append(tmp_pot_corr)
pot_corr = np.mean(pot_corr_tracker)
entry.parameters["freysoldt_meta"] = dict(self.metadata)
entry.parameters["potalign"] = pot_corr / (-q) if q else 0.0
return {
"freysoldt_electrostatic": es_corr,
"freysoldt_potential_alignment": pot_corr,
}
def perform_es_corr(self, lattice, q, step=1e-4):
"""
Peform Electrostatic Freysoldt Correction
Args:
lattice: Pymatgen lattice object
q (int): Charge of defect
step (float): step size for numerical integration
Return:
Electrostatic Point Charge contribution to Freysoldt Correction (float)
"""
logger.info("Running Freysoldt 2011 PC calculation (should be " "equivalent to sxdefectalign)")
logger.debug("defect lattice constants are (in angstroms)" + str(lattice.abc))
[a1, a2, a3] = ang_to_bohr * np.array(lattice.get_cartesian_coords(1))
logging.debug("In atomic units, lat consts are (in bohr):" + str([a1, a2, a3]))
vol = np.dot(a1, np.cross(a2, a3)) # vol in bohr^3
def e_iso(encut):
gcut = eV_to_k(encut) # gcut is in units of 1/A
return scipy.integrate.quad(lambda g: self.q_model.rho_rec(g * g) ** 2, step, gcut)[0] * (q ** 2) / np.pi
def e_per(encut):
eper = 0
for g2 in generate_reciprocal_vectors_squared(a1, a2, a3, encut):
eper += (self.q_model.rho_rec(g2) ** 2) / g2
eper *= (q ** 2) * 2 * round(np.pi, 6) / vol
eper += (q ** 2) * 4 * round(np.pi, 6) * self.q_model.rho_rec_limit0 / vol
return eper
eiso = converge(e_iso, 5, self.madetol, self.energy_cutoff)
logger.debug("Eisolated : %f", round(eiso, 5))
eper = converge(e_per, 5, self.madetol, self.energy_cutoff)
logger.info("Eperiodic : %f hartree", round(eper, 5))
logger.info("difference (periodic-iso) is %f hartree", round(eper - eiso, 6))
logger.info("difference in (eV) is %f", round((eper - eiso) * hart_to_ev, 4))
es_corr = round((eiso - eper) / self.dielectric * hart_to_ev, 6)
logger.info("Defect Correction without alignment %f (eV): ", es_corr)
return es_corr
def perform_pot_corr(
self,
axis_grid,
pureavg,
defavg,
lattice,
q,
defect_frac_position,
axis,
widthsample=1.0,
):
"""
For performing planar averaging potential alignment
Args:
axis_grid (1 x NGX where NGX is the length of the NGX grid
in the axis direction. Same length as pureavg list):
A numpy array which contain the cartesian axis
values (in angstroms) that correspond to each planar avg
potential supplied.
pureavg (1 x NGX where NGX is the length of the NGX grid in
the axis direction.):
A numpy array for the planar averaged
electrostatic potential of the bulk supercell.
defavg (1 x NGX where NGX is the length of the NGX grid in
the axis direction.):
A numpy array for the planar averaged
electrostatic potential of the defect supercell.
lattice: Pymatgen Lattice object of the defect supercell
q (float or int): charge of the defect
defect_frac_position: Fracitional Coordinates of the defect in the supercell
axis (int): axis for performing the freysoldt correction on
widthsample (float): width (in Angstroms) of the region in between defects
where the potential alignment correction is averaged. Default is 1 Angstrom.
Returns:
Potential Alignment contribution to Freysoldt Correction (float)
"""
logging.debug("run Freysoldt potential alignment method for axis " + str(axis))
nx = len(axis_grid)
# shift these planar averages to have defect at origin
axfracval = defect_frac_position[axis]
axbulkval = axfracval * lattice.abc[axis]
if axbulkval < 0:
axbulkval += lattice.abc[axis]
elif axbulkval > lattice.abc[axis]:
axbulkval -= lattice.abc[axis]
if axbulkval:
for i in range(nx):
if axbulkval < axis_grid[i]:
break
rollind = len(axis_grid) - i
pureavg = np.roll(pureavg, rollind)
defavg = np.roll(defavg, rollind)
# if not self._silence:
logger.debug("calculating lr part along planar avg axis")
reci_latt = lattice.reciprocal_lattice
dg = reci_latt.abc[axis]
dg /= ang_to_bohr # convert to bohr to do calculation in atomic units
# Build background charge potential with defect at origin
v_G = np.empty(len(axis_grid), np.dtype("c16"))
v_G[0] = 4 * np.pi * -q / self.dielectric * self.q_model.rho_rec_limit0
g = np.roll(np.arange(-nx / 2, nx / 2, 1, dtype=int), int(nx / 2)) * dg
g2 = np.multiply(g, g)[1:]
v_G[1:] = 4 * np.pi / (self.dielectric * g2) * -q * self.q_model.rho_rec(g2)
v_G[nx // 2] = 0 if not (nx % 2) else v_G[nx // 2]
# Get the real space potential by peforming a fft and grabbing the imaginary portion
v_R = np.fft.fft(v_G)
if abs(np.imag(v_R).max()) > self.madetol:
raise Exception("imaginary part found to be %s", repr(np.imag(v_R).max()))
v_R /= lattice.volume * ang_to_bohr ** 3
v_R = np.real(v_R) * hart_to_ev
# get correction
short = np.array(defavg) - np.array(pureavg) - np.array(v_R)
checkdis = int((widthsample / 2) / (axis_grid[1] - axis_grid[0]))
mid = int(len(short) / 2)
tmppot = [short[i] for i in range(mid - checkdis, mid + checkdis + 1)]
logger.debug("shifted defect position on axis (%s) to origin", repr(axbulkval))
logger.debug(
"means sampling region is (%f,%f)",
axis_grid[mid - checkdis],
axis_grid[mid + checkdis],
)
C = -np.mean(tmppot)
logger.debug("C = %f", C)
final_shift = [short[j] + C for j in range(len(v_R))]
v_R = [elmnt - C for elmnt in v_R]
logger.info("C value is averaged to be %f eV ", C)
logger.info("Potentital alignment energy correction (-q*delta V): %f (eV)", -q * C)
self.pot_corr = -q * C
# log plotting data:
self.metadata["pot_plot_data"][axis] = {
"Vr": v_R,
"x": axis_grid,
"dft_diff": np.array(defavg) - np.array(pureavg),
"final_shift": final_shift,
"check": [mid - checkdis, mid + checkdis + 1],
}
# log uncertainty:
self.metadata["pot_corr_uncertainty_md"][axis] = {
"stats": stats.describe(tmppot)._asdict(),
"potcorr": -q * C,
}
return self.pot_corr
def plot(self, axis, title=None, saved=False):
"""
Plots the planar average electrostatic potential against the Long range and
short range models from Freysoldt. Must run perform_pot_corr or get_correction
(to load metadata) before this can be used.
Args:
axis (int): axis to plot
title (str): Title to be given to plot. Default is no title.
saved (bool): Whether to save file or not. If False then returns plot
object. If True then saves plot as str(title) + "FreyplnravgPlot.pdf"
"""
if not self.metadata["pot_plot_data"]:
raise ValueError("Cannot plot potential alignment before running correction!")
x = self.metadata["pot_plot_data"][axis]["x"]
v_R = self.metadata["pot_plot_data"][axis]["Vr"]
dft_diff = self.metadata["pot_plot_data"][axis]["dft_diff"]
final_shift = self.metadata["pot_plot_data"][axis]["final_shift"]
check = self.metadata["pot_plot_data"][axis]["check"]
plt.figure()
plt.clf()
plt.plot(x, v_R, c="green", zorder=1, label="long range from model")
plt.plot(x, dft_diff, c="red", label="DFT locpot diff")
plt.plot(x, final_shift, c="blue", label="short range (aligned)")
tmpx = [x[i] for i in range(check[0], check[1])]
plt.fill_between(tmpx, -100, 100, facecolor="red", alpha=0.15, label="sampling region")
plt.xlim(round(x[0]), round(x[-1]))
ymin = min(min(v_R), min(dft_diff), min(final_shift))
ymax = max(max(v_R), max(dft_diff), max(final_shift))
plt.ylim(-0.2 + ymin, 0.2 + ymax)
plt.xlabel(r"distance along axis ($\AA$)", fontsize=15)
plt.ylabel("Potential (V)", fontsize=15)
plt.legend(loc=9)
plt.axhline(y=0, linewidth=0.2, color="black")
plt.title(str(title) + " defect potential", fontsize=18)
plt.xlim(0, max(x))
if saved:
plt.savefig(str(title) + "FreyplnravgPlot.pdf")
return None
return plt
class KumagaiCorrection(DefectCorrection):
"""
A class for KumagaiCorrection class. Largely adapated from PyCDT code
If this correction is used, please reference Kumagai and Oba's original paper
(doi: 10.1103/PhysRevB.89.195205) as well as Freysoldt's original
paper (doi: 10.1103/PhysRevLett.102.016402)
NOTE that equations 8 and 9 from Kumagai et al. reference are divided by (4 pi) to get SI units
"""
def __init__(self, dielectric_tensor, sampling_radius=None, gamma=None):
"""
Initializes the Kumagai Correction
Args:
dielectric_tensor (float or 3x3 matrix): Dielectric constant for the structure
optional data that can be tuned:
sampling_radius (float): radius (in Angstrom) which sites must be outside
of to be included in the correction. Publication by Kumagai advises to
use Wigner-Seitz radius of defect supercell, so this is default value.
gamma (float): convergence parameter for gamma function.
Code will automatically determine this if set to None.
"""
self.metadata = {
"gamma": gamma,
"sampling_radius": sampling_radius,
"potalign": None,
}
if isinstance(dielectric_tensor, (int, float)):
self.dielectric = np.identity(3) * dielectric_tensor
else:
self.dielectric = np.array(dielectric_tensor)
def get_correction(self, entry):
"""
Gets the Kumagai correction for a defect entry
Args:
entry (DefectEntry): defect entry to compute Kumagai correction on.
Requires following parameters in the DefectEntry to exist:
bulk_atomic_site_averages (list): list of bulk structure"s atomic site averaged ESPs * charge,
in same order as indices of bulk structure
note this is list given by VASP's OUTCAR (so it is multiplied by a test charge of -1)
defect_atomic_site_averages (list): list of defect structure"s atomic site averaged ESPs * charge,
in same order as indices of defect structure
note this is list given by VASP's OUTCAR (so it is multiplied by a test charge of -1)
site_matching_indices (list): list of corresponding site index values for
bulk and defect site structures EXCLUDING the defect site itself
(ex. [[bulk structure site index, defect structure"s corresponding site index], ... ]
initial_defect_structure (Structure): Pymatgen Structure object representing un-relaxed defect
structure
defect_frac_sc_coords (array): Defect Position in fractional coordinates of the supercell
given in bulk_structure
Returns:
KumagaiCorrection values as a dictionary
"""
bulk_atomic_site_averages = entry.parameters["bulk_atomic_site_averages"]
defect_atomic_site_averages = entry.parameters["defect_atomic_site_averages"]
site_matching_indices = entry.parameters["site_matching_indices"]
defect_sc_structure = entry.parameters["initial_defect_structure"]
defect_frac_sc_coords = entry.parameters["defect_frac_sc_coords"]
lattice = defect_sc_structure.lattice
volume = lattice.volume
q = entry.defect.charge
if not self.metadata["gamma"]:
self.metadata["gamma"] = tune_for_gamma(lattice, self.dielectric)
prec_set = [25, 28]
g_vecs, recip_summation, r_vecs, real_summation = generate_R_and_G_vecs(
self.metadata["gamma"], prec_set, lattice, self.dielectric
)
pot_shift = self.get_potential_shift(self.metadata["gamma"], volume)
si = self.get_self_interaction(self.metadata["gamma"])
es_corr = [(real_summation[ind] + recip_summation[ind] + pot_shift + si) for ind in range(2)]
# increase precision if correction is not converged yet
# TODO: allow for larger prec_set to be tried if this fails
if abs(es_corr[0] - es_corr[1]) > 0.0001:
logger.debug(
"Es_corr summation not converged! ({} vs. {})\nTrying a larger prec_set...".format(
es_corr[0], es_corr[1]
)
)
prec_set = [30, 35]
g_vecs, recip_summation, r_vecs, real_summation = generate_R_and_G_vecs(
self.metadata["gamma"], prec_set, lattice, self.dielectric
)
es_corr = [(real_summation[ind] + recip_summation[ind] + pot_shift + si) for ind in range(2)]
if abs(es_corr[0] - es_corr[1]) < 0.0001:
raise ValueError("Correction still not converged after trying prec_sets up to 35... serious error.")
es_corr = es_corr[0] * -(q ** 2.0) * kumagai_to_V / 2.0 # [eV]
# if no sampling radius specified for pot align, then assuming Wigner-Seitz radius:
if not self.metadata["sampling_radius"]:
wz = lattice.get_wigner_seitz_cell()
dist = []
for facet in wz:
midpt = np.mean(np.array(facet), axis=0)
dist.append(np.linalg.norm(midpt))
self.metadata["sampling_radius"] = min(dist)
# assemble site_list based on matching indices
# [[defect_site object, Vqb for site], .. repeat for all non defective sites]
site_list = []
for bs_ind, ds_ind in site_matching_indices:
Vqb = -(defect_atomic_site_averages[int(ds_ind)] - bulk_atomic_site_averages[int(bs_ind)])
site_list.append([defect_sc_structure[int(ds_ind)], Vqb])
pot_corr = self.perform_pot_corr(
defect_sc_structure,
defect_frac_sc_coords,
site_list,
self.metadata["sampling_radius"],
q,
r_vecs[0],
g_vecs[0],
self.metadata["gamma"],
)
entry.parameters["kumagai_meta"] = dict(self.metadata)
entry.parameters["potalign"] = pot_corr / (-q) if q else 0.0
return {
"kumagai_electrostatic": es_corr,
"kumagai_potential_alignment": pot_corr,
}
def perform_es_corr(self, gamma, prec, lattice, charge):
"""
Peform Electrostatic Kumagai Correction
Args:
gamma (float): Ewald parameter
prec (int): Precision parameter for reciprical/real lattice vector generation
lattice: Pymatgen Lattice object corresponding to defect supercell
charge (int): Defect charge
Return:
Electrostatic Point Charge contribution to Kumagai Correction (float)
"""
volume = lattice.volume
g_vecs, recip_summation, r_vecs, real_summation = generate_R_and_G_vecs(gamma, [prec], lattice, self.dielectric)
recip_summation = recip_summation[0]
real_summation = real_summation[0]
es_corr = (
recip_summation
+ real_summation
+ self.get_potential_shift(gamma, volume)
+ self.get_self_interaction(gamma)
)
es_corr *= -(charge ** 2.0) * kumagai_to_V / 2.0 # [eV]
return es_corr
def perform_pot_corr(
self,
defect_structure,
defect_frac_coords,
site_list,
sampling_radius,
q,
r_vecs,
g_vecs,
gamma,
):
"""
For performing potential alignment in manner described by Kumagai et al.
Args:
defect_structure: Pymatgen Structure object corrsponding to the defect supercell
defect_frac_coords (array): Defect Position in fractional coordinates of the supercell
given in bulk_structure
site_list: list of corresponding site index values for
bulk and defect site structures EXCLUDING the defect site itself
(ex. [[bulk structure site index, defect structure"s corresponding site index], ... ]
sampling_radius (float): radius (in Angstrom) which sites must be outside
of to be included in the correction. Publication by Kumagai advises to
use Wigner-Seitz radius of defect supercell, so this is default value.
q (int): Defect charge
r_vecs: List of real lattice vectors to use in summation
g_vecs: List of reciprocal lattice vectors to use in summation
gamma (float): Ewald parameter
Return:
Potential alignment contribution to Kumagai Correction (float)
"""
volume = defect_structure.lattice.volume
potential_shift = self.get_potential_shift(gamma, volume)
pot_dict = {} # keys will be site index in the defect structure
for_correction = [] # region to sample for correction
# for each atom, do the following:
# (a) get relative_vector from defect_site to site in defect_supercell structure
# (b) recalculate the recip and real summation values based on this r_vec
# (c) get information needed for pot align
for site, Vqb in site_list:
dist, jimage = site.distance_and_image_from_frac_coords(defect_frac_coords)
vec_defect_to_site = defect_structure.lattice.get_cartesian_coords(
site.frac_coords - jimage - defect_frac_coords
)
dist_to_defect = np.linalg.norm(vec_defect_to_site)
if abs(dist_to_defect - dist) > 0.001:
raise ValueError("Error in computing vector to defect")
relative_real_vectors = [r_vec - vec_defect_to_site for r_vec in r_vecs[:]]
real_sum = self.get_real_summation(gamma, relative_real_vectors)
recip_sum = self.get_recip_summation(gamma, g_vecs, volume, r=vec_defect_to_site[:])
Vpc = (real_sum + recip_sum + potential_shift) * kumagai_to_V * q
defect_struct_index = defect_structure.index(site)
pot_dict[defect_struct_index] = {
"Vpc": Vpc,
"Vqb": Vqb,
"dist_to_defect": dist_to_defect,
}
logger.debug("For atom {}\n\tbulk/defect DFT potential difference = " "{}".format(defect_struct_index, Vqb))
logger.debug("\tanisotropic model charge: {}".format(Vpc))
logger.debug("\t\treciprocal part: {}".format(recip_sum * kumagai_to_V * q))
logger.debug("\t\treal part: {}".format(real_sum * kumagai_to_V * q))
logger.debug("\t\tself interaction part: {}".format(potential_shift * kumagai_to_V * q))
logger.debug("\trelative_vector to defect: {}".format(vec_defect_to_site))
if dist_to_defect > sampling_radius:
logger.debug(
"\tdistance to defect is {} which is outside minimum sampling "
"radius {}".format(dist_to_defect, sampling_radius)
)
for_correction.append(Vqb - Vpc)
else:
logger.debug(
"\tdistance to defect is {} which is inside minimum sampling "
"radius {} (so will not include for correction)"
"".format(dist_to_defect, sampling_radius)
)
if len(for_correction):
pot_alignment = np.mean(for_correction)
else:
logger.info("No atoms sampled for_correction radius!" " Assigning potential alignment value of 0.")
pot_alignment = 0.0
self.metadata["potalign"] = pot_alignment
pot_corr = -q * pot_alignment
# log uncertainty stats:
self.metadata["pot_corr_uncertainty_md"] = {
"stats": stats.describe(for_correction)._asdict(),
"number_sampled": len(for_correction),
}
self.metadata["pot_plot_data"] = pot_dict
logger.info("Kumagai potential alignment (site averaging): %f", pot_alignment)
logger.info("Kumagai potential alignment correction energy: %f eV", pot_corr)
return pot_corr
def get_real_summation(self, gamma, real_vectors):
"""
Get real summation term from list of real-space vectors
"""
real_part = 0
invepsilon = np.linalg.inv(self.dielectric)
rd_epsilon = np.sqrt(np.linalg.det(self.dielectric))
for r_vec in real_vectors:
if np.linalg.norm(r_vec) > 1e-8:
loc_res = np.sqrt(np.dot(r_vec, np.dot(invepsilon, r_vec)))
nmr = scipy.special.erfc(gamma * loc_res) # pylint: disable=E1101
real_part += nmr / loc_res
real_part /= 4 * np.pi * rd_epsilon
return real_part
def get_recip_summation(self, gamma, recip_vectors, volume, r=[0.0, 0.0, 0.0]):
"""
Get Reciprocal summation term from list of reciprocal-space vectors
"""
recip_part = 0
for g_vec in recip_vectors:
# dont need to avoid G=0, because it will not be
# in recip list (if generate_R_and_G_vecs is used)
Gdotdiel = np.dot(g_vec, np.dot(self.dielectric, g_vec))
summand = np.exp(-Gdotdiel / (4 * (gamma ** 2))) * np.cos(np.dot(g_vec, r)) / Gdotdiel
recip_part += summand
recip_part /= volume
return recip_part
def get_self_interaction(self, gamma):
"""
Args:
gamma ():
Returns:
Self-interaction energy of defect.
"""
determ = np.linalg.det(self.dielectric)
return -gamma / (2.0 * np.pi * np.sqrt(np.pi * determ))
@staticmethod
def get_potential_shift(gamma, volume):
"""
Args:
gamma (float): Gamma
volume (float): Volume.
Returns:
Potential shift for defect.
"""
return -0.25 / (volume * gamma ** 2.0)
def plot(self, title=None, saved=False):
"""
Plots the AtomicSite electrostatic potential against the Long range and short range models
from Kumagai and Oba (doi: 10.1103/PhysRevB.89.195205)
"""
if "pot_plot_data" not in self.metadata.keys():
raise ValueError("Cannot plot potential alignment before running correction!")
sampling_radius = self.metadata["sampling_radius"]
site_dict = self.metadata["pot_plot_data"]
potalign = self.metadata["potalign"]
plt.figure()
plt.clf()
distances, sample_region = [], []
Vqb_list, Vpc_list, diff_list = [], [], []
for site_ind, site_dict in site_dict.items():
dist = site_dict["dist_to_defect"]
distances.append(dist)
Vqb = site_dict["Vqb"]
Vpc = site_dict["Vpc"]
Vqb_list.append(Vqb)
Vpc_list.append(Vpc)
diff_list.append(Vqb - Vpc)
if dist > sampling_radius:
sample_region.append(Vqb - Vpc)
plt.plot(
distances,
Vqb_list,
color="r",
marker="^",
linestyle="None",
label="$V_{q/b}$",
)
plt.plot(
distances,
Vpc_list,
color="g",
marker="o",
linestyle="None",
label="$V_{pc}$",
)
plt.plot(
distances,
diff_list,
color="b",
marker="x",
linestyle="None",
label="$V_{q/b}$ - $V_{pc}$",
)
x = np.arange(sampling_radius, max(distances) * 1.05, 0.01)
y_max = max(max(Vqb_list), max(Vpc_list), max(diff_list)) + 0.1
y_min = min(min(Vqb_list), min(Vpc_list), min(diff_list)) - 0.1
plt.fill_between(x, y_min, y_max, facecolor="red", alpha=0.15, label="sampling region")
plt.axhline(y=potalign, linewidth=0.5, color="red", label="pot. align. / -q")
plt.legend(loc=0)
plt.axhline(y=0, linewidth=0.2, color="black")
plt.ylim([y_min, y_max])
plt.xlim([0, max(distances) * 1.1])
plt.xlabel(r"Distance from defect ($\AA$)", fontsize=20)
plt.ylabel("Potential (V)", fontsize=20)
plt.title(str(title) + " atomic site potential plot", fontsize=20)
if saved:
plt.savefig(str(title) + "KumagaiESPavgPlot.pdf")
return None
return plt
class BandFillingCorrection(DefectCorrection):
"""
A class for BandFillingCorrection class. Largely adapted from PyCDT code
"""
def __init__(self, resolution=0.01):
"""
Initializes the Bandfilling correction
Args:
resolution (float): energy resolution to maintain for gap states
"""
self.resolution = resolution
self.metadata = {"num_hole_vbm": None, "num_elec_cbm": None, "potalign": None}
def get_correction(self, entry):
"""
Gets the BandFilling correction for a defect entry
Args:
entry (DefectEntry): defect entry to compute BandFilling correction on.
Requires following parameters in the DefectEntry to exist:
eigenvalues
dictionary of defect eigenvalues, as stored in a Vasprun object
kpoint_weights (list of floats)
kpoint weights corresponding to the dictionary of eigenvalues,
as stored in a Vasprun object
potalign (float)
potential alignment for the defect calculation
Only applies to non-zero charge,
When using potential alignment correction (freysoldt or kumagai),
need to divide by -q
cbm (float)
CBM of bulk calculation (or band structure calculation of bulk);
calculated on same level of theory as the defect
(ex. GGA defects -> requires GGA cbm)
vbm (float)
VBM of bulk calculation (or band structure calculation of bulk);
calculated on same level of theory as the defect
(ex. GGA defects -> requires GGA vbm)
Returns:
Bandfilling Correction value as a dictionary
"""
eigenvalues = entry.parameters["eigenvalues"]
kpoint_weights = entry.parameters["kpoint_weights"]
potalign = entry.parameters["potalign"]
vbm = entry.parameters["vbm"]
cbm = entry.parameters["cbm"]
bf_corr = self.perform_bandfill_corr(eigenvalues, kpoint_weights, potalign, vbm, cbm)
entry.parameters["bandfilling_meta"] = dict(self.metadata)
return {"bandfilling_correction": bf_corr}
def perform_bandfill_corr(self, eigenvalues, kpoint_weights, potalign, vbm, cbm):
"""
This calculates the band filling correction based on excess of electrons/holes in CB/VB...
Note that the total free holes and electrons may also be used for a "shallow donor/acceptor"
correction with specified band shifts:
+num_elec_cbm * Delta E_CBM (or -num_hole_vbm * Delta E_VBM)
"""
bf_corr = 0.0
self.metadata["potalign"] = potalign
self.metadata["num_hole_vbm"] = 0.0
self.metadata["num_elec_cbm"] = 0.0
core_occupation_value = list(eigenvalues.values())[0][0][0][1] # get occupation of a core eigenvalue
if len(eigenvalues.keys()) == 1:
# needed because occupation of non-spin calcs is sometimes still 1... should be 2
spinfctr = 2.0 if core_occupation_value == 1.0 else 1.0
elif len(eigenvalues.keys()) == 2:
spinfctr = 1.0
else:
raise ValueError("Eigenvalue keys greater than 2")
# for tracking mid gap states...
shifted_cbm = potalign + cbm # shift cbm with potential alignment
shifted_vbm = potalign + vbm # shift vbm with potential alignment
for spinset in eigenvalues.values():
for kptset, weight in zip(spinset, kpoint_weights):
for eig, occu in kptset: # eig is eigenvalue and occu is occupation
if occu and (eig > shifted_cbm - self.resolution): # donor MB correction
bf_corr += weight * spinfctr * occu * (eig - shifted_cbm) # "move the electrons down"
self.metadata["num_elec_cbm"] += weight * spinfctr * occu
elif (occu != core_occupation_value) and (
eig <= shifted_vbm + self.resolution
): # acceptor MB correction
bf_corr += (
weight * spinfctr * (core_occupation_value - occu) * (shifted_vbm - eig)
) # "move the holes up"
self.metadata["num_hole_vbm"] += weight * spinfctr * (core_occupation_value - occu)
bf_corr *= -1 # need to take negative of this shift for energetic correction
return bf_corr
class BandEdgeShiftingCorrection(DefectCorrection):
"""
A class for BandEdgeShiftingCorrection class. Largely adapted from PyCDT code
"""
def __init__(self):
"""
Initializes the BandEdgeShiftingCorrection class
"""
self.metadata = {
"vbmshift": 0.0,
"cbmshift": 0.0,
}
def get_correction(self, entry):
"""
Gets the BandEdge correction for a defect entry
Args:
entry (DefectEntry): defect entry to compute BandFilling correction on.
Requires some parameters in the DefectEntry to properly function:
hybrid_cbm (float)
CBM of HYBRID bulk calculation one wishes to shift to
hybrid_vbm (float)
VBM of HYBRID bulk calculation one wishes to shift to
cbm (float)
CBM of bulk calculation (or band structure calculation of bulk);
calculated on same level of theory as the defect
(ex. GGA defects -> requires GGA cbm)
vbm (float)
VBM of bulk calculation (or band structure calculation of bulk);
calculated on same level of theory as the defect
(ex. GGA defects -> requires GGA vbm)
Returns:
BandfillingCorrection value as a dictionary
"""
hybrid_cbm = entry.parameters["hybrid_cbm"]
hybrid_vbm = entry.parameters["hybrid_vbm"]
vbm = entry.parameters["vbm"]
cbm = entry.parameters["cbm"]
self.metadata["vbmshift"] = hybrid_vbm - vbm # note vbmshift has UPWARD as positive convention
self.metadata["cbmshift"] = hybrid_cbm - cbm # note cbmshift has UPWARD as positive convention
charge = entry.charge
bandedgeshifting_correction = charge * self.metadata["vbmshift"]
entry.parameters["bandshift_meta"] = dict(self.metadata)
return {"bandedgeshifting_correction": bandedgeshifting_correction}
|
richardtran415/pymatgen
|
pymatgen/analysis/defects/corrections.py
|
Python
|
mit
| 38,404
|
[
"VASP",
"pymatgen"
] |
a62a74149ea1d7658a13ae0cc9135035353ba30c186085447ff993919a8b743d
|
#*- coding : utf-8 -*-
'''
Created on 2011-01-18\n
The twist angle for G-DNA is defined from a JCTC article. which DOI is 10.1021/ct100253m.
The twist angle is defined using the angle between the line of C1' atoms in a G-quartet layer.
@version: 0.1.0
@author: zhuh
@change:
- 2011-01-18\n
- Create this file.
- 2011-01-19\n
- finish the function B{Get_twist_in_GDNA()} and test it
- 2011-01-25\n
- modified the function B{Get_twist_in_GDNA()}, \
so both gro and pdb can be used for coor_file.
- add the version to B{0.1.0}
'''
import MDAnalysis
import numpy
import math
import Simple_atom
import usage
import DNA_matrix
import os
def Get_twist_in_GDNA(traj_file,coor_file,base_list_1,base_list_2,output_file):
'''
Input the layer 1 (G11,G12,G13,G14) and layer 2 (G21,G22,G23,G24),Calculate the angle
between G1i-G1(i+1) and G2i-G2(i+1). write the result to output_file.\n
B{traj_file:} the GMX trajectory file, in trr or xtc format.\n
B{coor_file:} the GMX coordinate file, in pdb or gro format.\n
B{base_list_1:} the frist group contain four guanine bases.\n
B{base_list_2:} the second group contain four guanine bases.\n
B{output_file:} the output file.
'''
C1_list_1=[]
C1_list_2=[]
print " init......"
fp=open(output_file,"w")
fp.write("#Group 1: ")
for i in base_list_1:
fp.write("%d\t " %i)
fp.write("\n")
fp.write("#Group 2: ")
for i in base_list_2:
fp.write("%d\t " %i)
fp.write("\n")
fp.write("#time\t angle_1 \t angle _2 \t angle _3 \t angle_4\n")
Atom_list=Simple_atom.Get_Simple_atom_list(coor_file)
for base in base_list_1:
atom_list=Simple_atom.Get_Atom_in_residue(Atom_list,base)
for atom in atom_list:
if atom.atom_name =="C1'":
C1_list_1.append(atom.atom_serial)
for base in base_list_2:
atom_list=Simple_atom.Get_Atom_in_residue(Atom_list,base)
for atom in atom_list:
if atom.atom_name =="C1'":
C1_list_2.append(atom.atom_serial)
# print C1_list_1
# print C1_list_2
u=MDAnalysis.Universe(coor_file,traj_file)
for ts in u.trajectory:
angle=[]
for i in range(4):
vector1=[ts._x[C1_list_1[(i+1)%4]-1]-ts._x[C1_list_1[i]-1],\
ts._y[C1_list_1[(i+1)%4]-1]-ts._y[C1_list_1[i]-1],\
ts._z[C1_list_1[(i+1)%4]-1]-ts._z[C1_list_1[i]-1]]
vector2=[ts._x[C1_list_2[(i+1)%4]-1]-ts._x[C1_list_2[i]-1],\
ts._y[C1_list_2[(i+1)%4]-1]-ts._y[C1_list_2[i]-1],\
ts._z[C1_list_2[(i+1)%4]-1]-ts._z[C1_list_2[i]-1]]
vector1=numpy.array(vector1)
vector2=numpy.array(vector2)
# print vector1,vector2
gamma=numpy.dot(vector1,vector2)/(math.sqrt(numpy.dot(vector1,vector1)*numpy.dot(vector2,vector2)))
angle.append(math.acos(gamma)/3.1416*180)
fp.write("%6.3f \t %6.3f \t %6.3f \t %6.3f \t %6.3f\n" \
%(ts.time/1000,angle[0],angle[1],angle[2],angle[3]))
#if ts.frame % 100 ==0:
# print " analysis frame %6d......" %ts.frame
usage.echo(" analysis frame %6d......\r" %ts.frame)
fp.close()
print "The result are in the file: ",output_file
def Get_twist_in_GDNA2(traj_file,coor_file,base_list_1,base_list_2,output_name,skip=1,dt=1,begin=0,end=-1):
'''
Input the layer 1 (G11,G12,G13,G14) and layer 2 (G21,G22,G23,G24),Calculate the angle
between G1i-G1(i+1) and G2i-G2(i+1). write the result to output_file.\n
B{traj_file:} the GMX trajectory file, in trr or xtc format.\n
B{coor_file:} the GMX coordinate file, in pdb or gro format.\n
B{base_list_1:} the frist group contain four guanine bases.\n
B{base_list_2:} the second group contain four guanine bases.\n
B{output_file:} the output file.
'''
LIST_NUM=len(base_list_1)
Atom_list=Simple_atom.Get_Simple_atom_list(coor_file)
residue_list=Simple_atom.Get_Residue_list(Atom_list)
# print residue_list
base_name_list_1=list()
base_name_list_2=list()
base_atom_list_1=list()
base_atom_list_2=list()
C1_list_1=list()
C1_list_2=list()
print " init......"
for i in range(LIST_NUM):
if os.path.isfile(output_name[i]):
print "backup %s to %s" %(output_name[i],"#"+output_name[i]+"#")
try:
os.rename(output_name[i],"#"+output_name[i]+"#")
except OSError,e:
print e
print "the file %s will be overwrited!" %output_name[i]
fp=open(output_name[i],"w")
fp.write("#Group 1: ")
for li in base_list_1[i]:
fp.write("%d\t " %li)
fp.write("\n")
fp.write("#Group 2: ")
for li in base_list_2[i]:
fp.write("%d\t " %li)
fp.write("\n")
fp.write("#time\t angle_1 \t angle _2 \t angle _3 \t angle_4\n")
fp.close()
base_name_list_1.append( [residue_list[j-1] for j in base_list_1[i]])
# print base_name_list_1
base_name_list_2.append( [residue_list[j-1] for j in base_list_2[i]])
# print base_name_list_2
base_atom_list_1.append([DNA_matrix.Get_baseID_list(Atom_list,j) for j in base_list_1[i]])
# print base_atom_list_1
base_atom_list_2.append([DNA_matrix.Get_baseID_list(Atom_list,j) for j in base_list_2[i]])
# print base_atom_list_2
for base in base_list_1[i]:
atom_list=Simple_atom.Get_Atom_in_residue(Atom_list,base)
for atom in atom_list:
if atom.atom_name =="C1'":
C1_list_1.append(atom.atom_serial)
# print C1_list_1
for base in base_list_2[i]:
atom_list=Simple_atom.Get_Atom_in_residue(Atom_list,base)
for atom in atom_list:
if atom.atom_name =="C1'":
C1_list_2.append(atom.atom_serial)
# print C1_list_2
u=MDAnalysis.Universe(coor_file,traj_file)
if traj_file.endswith("mdcrd") or traj_file.endswith("dcd"):
pass
else:
dt=u.trajectory.dt
for ts in u.trajectory:
time=float((ts.frame-1)*dt)
if time < begin:
continue
if time > end and end !=-1:
break
if ts.frame % skip == 0 :
for i in range(LIST_NUM):
r1=[]
'''the group 1 rotate list'''
r2=[]
'''the group 2 rotate list'''
c1=[]
'''the group 1 coordinate list'''
c2=[]
'''the group 2 coordinate list'''
for m in range(len(base_name_list_1[i])):
temp_list = [ [ts._x[x-1], ts._y[x-1], ts._z[x-1]] for x in base_atom_list_1[i][m] ]
result = DNA_matrix.Get_rotate_matrix(numpy.array(temp_list), base_name_list_1[i][m][0])
#base_name_list_1[index of the groups][index of the base of group 1][base_name,base_serial]
c1.append(numpy.array(temp_list))
r1.append(result)
for m in range(len(base_name_list_2[i])):
temp_list = [ [ts._x[x-1], ts._y[x-1], ts._z[x-1]] for x in base_atom_list_2[i][m] ]
result = DNA_matrix.Get_rotate_matrix(numpy.array(temp_list), base_name_list_2[i][m][0])
c2.append(numpy.array(temp_list))
r2.append(result)
orient_group_1,origin_group_1 = DNA_matrix.Get_group_rotmat(r1,len(base_name_list_1[i]))
orient_group_2,origin_group_2 = DNA_matrix.Get_group_rotmat(r2,len(base_name_list_2[i]))
RMSD1=DNA_matrix.Get_group_RMSD(base_name_list_1[i],c1,origin_group_1,orient_group_1)
RMSD2=DNA_matrix.Get_group_RMSD(base_name_list_2[i],c2,origin_group_2,orient_group_2)
angle=[]
for k in range(4):
vector1=[ts._x[C1_list_1[(k+1)%4]-1]-ts._x[C1_list_1[k]-1],\
ts._y[C1_list_1[(k+1)%4]-1]-ts._y[C1_list_1[k]-1],\
ts._z[C1_list_1[(k+1)%4]-1]-ts._z[C1_list_1[k]-1]]
vector2=[ts._x[C1_list_2[(k+1)%4]-1]-ts._x[C1_list_2[k]-1],\
ts._y[C1_list_2[(k+1)%4]-1]-ts._y[C1_list_2[k]-1],\
ts._z[C1_list_2[(k+1)%4]-1]-ts._z[C1_list_2[k]-1]]
vector1=numpy.array(vector1)
vector1_1=numpy.cross(numpy.cross(orient_group_1,vector1),orient_group_1)
vector2=numpy.array(vector2)
vector2_2=numpy.cross(numpy.cross(orient_group_2,vector2),orient_group_2)
# print vector1_1,vector2_2
gamma=numpy.dot(vector1_1,vector2_2)/(math.sqrt(numpy.dot(vector1_1,vector1_1)*numpy.dot(vector2_2,vector2_2)))
angle.append(math.acos(abs(gamma))/3.1416*180)
fp=open(output_name[i],'a')
fp.write("%6.3f \t %6.3f \t %6.3f \t %6.3f \t %6.3f\n" \
%(ts.time/1000,angle[0],angle[1],angle[2],angle[3]))
if ts.frame % 100 ==0 and i ==0:
usage.echo(" analysis frame %6d\r" %ts.frame)
fp.close()
print "The result are in the file: ",output_name
|
zhuhong/g4analysis
|
G4Analysis/G4_twist.py
|
Python
|
gpl-2.0
| 9,527
|
[
"MDAnalysis"
] |
7a5f824b80f70f915297730b5b687e631c7a8e36fd4edd61f7e5fd206011b115
|
'''
Created on Jun 4, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import os
import collections
import itertools
import operator
from chimerascan import pysam
from chimerascan.lib import config
from chimerascan.lib.chimera import Chimera, \
DiscordantTags, DISCORDANT_TAG_NAME, \
OrientationTags, ORIENTATION_TAG_NAME, \
DiscordantRead, ChimeraTypes, ChimeraPartner
from chimerascan.lib.gene_to_genome import build_tid_tx_maps
def parse_pairs(bamfh):
bam_iter = iter(bamfh)
try:
while True:
r1 = bam_iter.next()
r2 = bam_iter.next()
yield r1,r2
except StopIteration:
pass
def parse_gene_chimeric_reads(bamfh):
# create a dictionary structure to hold read pairs
chimera_dict = collections.defaultdict(lambda: [])
for r1,r2 in parse_pairs(bamfh):
#
# TODO:
# for now we are only going to deal with gene-gene
# chimeras and leave other chimeras for study at a
# later time
#
dr1 = r1.opt(DISCORDANT_TAG_NAME)
dr2 = r2.opt(DISCORDANT_TAG_NAME)
if (dr1 != DiscordantTags.DISCORDANT_GENE or
dr2 != DiscordantTags.DISCORDANT_GENE):
continue
# organize key in 5' to 3' order
or1 = r1.opt(ORIENTATION_TAG_NAME)
or2 = r2.opt(ORIENTATION_TAG_NAME)
assert or1 != or2
if or1 == OrientationTags.FIVEPRIME:
pair = (r1,r2)
else:
pair = (r2,r1)
# store pertinent information in lightweight structure
# convert to DiscordantRead objects
r5p = DiscordantRead.from_read(pair[0])
r3p = DiscordantRead.from_read(pair[1])
# keep list of discordant chimeric reads
chimera_dict[(r5p.tid, r3p.tid)].append((r5p,r3p))
for key,pairs in chimera_dict.iteritems():
rname1,rname2 = key
yield rname1, rname2, pairs
def get_chimera_type(fiveprime_gene, threeprime_gene, gene_trees):
"""
return tuple containing ChimeraType and distance
between 5' and 3' genes
"""
# get gene information
chrom5p, start5p, end5p, strand1 = fiveprime_gene.chrom, fiveprime_gene.tx_start, fiveprime_gene.tx_end, fiveprime_gene.strand
chrom3p, start3p, end3p, strand2 = threeprime_gene.chrom, threeprime_gene.tx_start, threeprime_gene.tx_end, threeprime_gene.strand
# interchromosomal
if chrom5p != chrom3p:
return ChimeraTypes.INTERCHROMOSOMAL, None
# orientation
same_strand = strand1 == strand2
# genes on same chromosome so check overlap
is_overlapping = (start5p < end3p) and (start3p < end5p)
if is_overlapping:
if not same_strand:
if ((start5p <= start3p and strand1 == "+") or
(start5p > start3p and strand1 == "-")):
return (ChimeraTypes.OVERLAP_CONVERGE, 0)
else:
return (ChimeraTypes.OVERLAP_DIVERGE, 0)
else:
if ((start5p <= start3p and strand1 == "+") or
(end5p >= end3p and strand1 == "-")):
return (ChimeraTypes.OVERLAP_SAME, 0)
else:
return (ChimeraTypes.OVERLAP_COMPLEX, 0)
# if code gets here then the genes are on the same chromosome but do not
# overlap. first calculate distance (minimum distance between genes)
if start5p <= start3p:
distance = start3p - end5p
between_start,between_end = end5p,start3p
else:
distance = end3p - start5p
between_start,between_end = end3p,start5p
# check whether there are genes intervening between the
# chimera candidates
genes_between = []
genes_between_same_strand = []
for hit in gene_trees[chrom5p].find(between_start,
between_end):
if (hit.start > between_start and
hit.end < between_end):
if hit.strand == strand1:
genes_between_same_strand.append(hit)
genes_between.append(hit)
if same_strand:
if len(genes_between_same_strand) == 0:
return ChimeraTypes.READTHROUGH, distance
else:
return ChimeraTypes.INTRACHROMOSOMAL, distance
else:
# check for reads between neighboring genes
if len(genes_between) == 0:
if ((start5p <= start3p and strand1 == "+") or
(start5p > start3p and strand1 == "-")):
return (ChimeraTypes.ADJ_CONVERGE, distance)
elif ((start5p >= start3p and strand1 == "+") or
(start5p < start3p and strand1 == "-")):
return (ChimeraTypes.ADJ_DIVERGE, distance)
elif ((start5p <= start3p and strand1 == "+") or
(start5p > start3p and strand1 == "-")):
return (ChimeraTypes.ADJ_SAME, distance)
elif ((start5p >= start3p and strand1 == "+") or
(start5p < start3p and strand1 == '-')):
return (ChimeraTypes.ADJ_COMPLEX, distance)
else:
return ChimeraTypes.INTRA_COMPLEX, distance
return ChimeraTypes.UNKNOWN, distance
def read_pairs_to_chimera(chimera_name, tid5p, tid3p, readpairs,
tid_tx_map, genome_tx_trees, trim_bp):
# get gene information
tx5p = tid_tx_map[tid5p]
tx3p = tid_tx_map[tid3p]
# categorize chimera type
chimera_type, distance = get_chimera_type(tx5p, tx3p, genome_tx_trees)
# create chimera object
c = Chimera()
iter5p = itertools.imap(operator.itemgetter(0), readpairs)
iter3p = itertools.imap(operator.itemgetter(1), readpairs)
c.partner5p = ChimeraPartner.from_discordant_reads(iter5p, tx5p, trim_bp)
c.partner3p = ChimeraPartner.from_discordant_reads(iter3p, tx3p, trim_bp)
c.name = chimera_name
c.chimera_type = chimera_type
c.distance = distance
# raw reads
c.encomp_read_pairs = readpairs
return c
def nominate_chimeras(index_dir, input_bam_file, output_file, trim_bp):
logging.debug("Reading gene information")
gene_file = os.path.join(index_dir, config.GENE_FEATURE_FILE)
bamfh = pysam.Samfile(input_bam_file, "rb")
# build a lookup table to get genomic intervals from transcripts
tid_tx_map, genome_tx_trees = build_tid_tx_maps(bamfh, gene_file,
rname_prefix=config.GENE_REF_PREFIX)
# group discordant read pairs by gene
chimera_num = 0
outfh = open(output_file, "w")
logging.debug("Parsing discordant reads")
for tid5p,tid3p,readpairs in parse_gene_chimeric_reads(bamfh):
c = read_pairs_to_chimera("C%07d" % (chimera_num), tid5p, tid3p,
readpairs, tid_tx_map,
genome_tx_trees, trim_bp)
fields = c.to_list()
chimera_num += 1
print >>outfh, '\t'.join(map(str, fields))
outfh.close()
bamfh.close()
def main():
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <index> "
"<discordant_reads.srt.bedpe> <chimeras.txt>")
parser.add_option("--trim", dest="trim", type="int",
default=config.EXON_JUNCTION_TRIM_BP)
options, args = parser.parse_args()
index_dir = args[0]
input_file = args[1]
output_file = args[2]
nominate_chimeras(index_dir, input_file, output_file, options.trim)
if __name__ == '__main__':
main()
|
tectronics/chimerascan
|
chimerascan/deprecated/old_nominate_chimeras.py
|
Python
|
gpl-3.0
| 8,393
|
[
"pysam"
] |
3a3ffba4a608a478548fdda94d2e8159cdebcf7697ed4a2c1e1c7b29bf8de505
|
# -*- coding: utf-8 -*-
'''
Provide authentication using YubiKey.
.. versionadded:: 2015.5.0
:depends: yubico-client Python module
To get your YubiKey API key you will need to visit the website below.
https://upgrade.yubico.com/getapikey/
The resulting page will show the generated Client ID (aka AuthID or API ID)
and the generated API key (Secret Key). Make a note of both and use these
two values in your /etc/salt/master configuration.
/etc/salt/master
.. code-block:: yaml
yubico_users:
damian:
id: 12345
key: ABCDEFGHIJKLMNOPQRSTUVWXYZ
.. code-block:: yaml
external_auth:
yubico:
damian:
- test.*
Please wait five to ten minutes after generating the key before testing so that
the API key will be updated on all the YubiCloud servers.
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
try:
from yubico_client import Yubico, yubico_exceptions
HAS_YUBICO = True
except ImportError:
HAS_YUBICO = False
def __get_yubico_users(username):
'''
Grab the YubiKey Client ID & Secret Key
'''
user = {}
try:
if __opts__['yubico_users'].get(username, None):
(user['id'], user['key']) = list(__opts__['yubico_users'][username].values())
else:
return None
except KeyError:
return None
return user
def auth(username, password):
'''
Authenticate against yubico server
'''
_cred = __get_yubico_users(username)
client = Yubico(_cred['id'], _cred['key'])
try:
if client.verify(password):
return True
else:
return False
except yubico_exceptions.StatusCodeError as e:
log.info('Unable to verify YubiKey `{0}`'.format(e))
return False
if __name__ == '__main__':
__opts__ = {'yubico_users': {'damian': {'id': '12345', 'key': 'ABC123'}}}
if auth('damian', 'OPT'):
print("Authenticated")
else:
print("Failed to authenticate")
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/salt/auth/yubico.py
|
Python
|
apache-2.0
| 2,096
|
[
"VisIt"
] |
fe6470dd10a207bb6ea7a35c955a26482d32b81af03b1b90e9cc88ae303eae9d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Get polarizability from X0h server (http://x-server.gmca.aps.anl.gov/x0h.html).
For details see http://x-server.gmca.aps.anl.gov/pub/Stepanov_CR_1991_08.pdf.
"""
from __future__ import division
import math
import re
import requests
X0H_SERVER = 'http://x-server.gmca.aps.anl.gov/cgi/x0h_form.exe'
def calc_bragg_angle(d, energy_eV, n=1):
"""Calculate Bragg angle from the provided energy and d-spacing.
Args:
d (float): interplanar spacing (d-spacing) [A].
energy_eV (float): photon energy [eV].
n (int): number of diffraction peak.
Returns:
dict: the resulted dictionary with:
lamda (float): wavelength [nm].
bragg_angle (float): Bragg angle [rad].
bragg_angle_deg (float): Bragg angle [deg].
"""
# Check/convert types first:
d = float(d)
energy_eV = float(energy_eV)
n = int(n)
lamda = 1239.84193 / energy_eV # lamda in [nm]
bragg_angle = math.asin(n * lamda / (2 * d * 0.1)) # convert d from [A] to [nm].
bragg_angle_deg = 180. / math.pi * bragg_angle
return {
'lamda': lamda,
'bragg_angle': bragg_angle,
'bragg_angle_deg': bragg_angle_deg,
}
def get_crystal_parameters(material, energy_eV, h, k, l):
"""Obtain parameters for the specified crystal and energy.
Args:
material (str): material full name (e.g., 'Silicon').
energy_eV (float): photon energy [eV].
h (int): Miller's index h.
k (int): Miller's index k.
l (int): Miller's index l.
Returns:
dict: crystal parameters:
d (float): interplanar spacing (d-spacing) [A].
xr0 (float): real part of the 0-th Fourier component of crystal's polarizability.
xi0 (float): imaginary part of the 0-th Fourier component of crystal's polarizability.
xrh (float): real part of the H-th Fourier component of crystal's polarizability (Sigma polarization).
xih (float): imaginary part of the H-th Fourier component of crystal's polarizability (Sigma polarization).
bragg_angle_deg (float): Bragg angle [deg].
"""
# Check/convert types first:
energy_eV = float(energy_eV)
h = int(h)
k = int(k)
l = int(l)
energy_keV = energy_eV / 1000.0 # convert to keV
content = _get_server_data(energy_keV, material, h, k, l)
crystal_parameters = _get_crystal_parameters(content, [h, k, l])
return crystal_parameters
def _get_crystal_parameters(content, miller_indices=None):
"""Get reflecting planes distance and polarizability from the server's response.
Args:
content: split content of the server's response.
miller_indices: Miller's indices of reflection.
Returns:
dict: crystal parameters.
"""
a1_list = [] # lattice parameter
d_server_list = [] # d-spacing from the server
bragg_angle_list = []
xr0_list = []
xi0_list = []
xrh_list = []
xih_list = []
for row in content:
if re.search('a1=', row):
a1_list.append(row)
elif re.search(' d=', row):
d_server_list.append(row)
elif re.search('QB=', row):
bragg_angle_list.append(row)
elif re.search('xr0=', row):
xr0_list.append(row)
elif re.search('xi0=', row):
xi0_list.append(row)
elif re.search('xrh', row):
xrh_list.append(row)
elif re.search('xih', row):
xih_list.append(row)
assert len(a1_list) > 0
a1 = _parse_xr_xi(a1_list[0])
d_calculated = a1
if miller_indices:
d_calculated /= (sum(n ** 2 for n in miller_indices)) ** 0.5
assert len(d_server_list) > 0
d_server = _parse_xr_xi(d_server_list[0])
assert len(bragg_angle_list) > 0
bragg_angle_deg = _parse_xr_xi(bragg_angle_list[0])
assert len(xr0_list) > 0
xr0 = _parse_xr_xi(xr0_list[0])
xi0 = _parse_xr_xi(xi0_list[0])
xrh = _parse_xr_xi(xrh_list[0])
xih = _parse_xr_xi(xih_list[0])
return {
'a1': a1,
'd': d_calculated,
'd_calculated': d_calculated,
'd_server': d_server,
'bragg_angle_deg': bragg_angle_deg,
'xr0': xr0,
'xi0': xi0,
'xrh': xrh,
'xih': xih,
}
def _get_server_data(energy, material, h, k, l):
"""
The function gets data from the server and splits it by lines.
:param energy: energy [keV].
:param material: material, e.g. Silicon or Germanium
:param h: Miller's index h.
:param k: Miller's index k.
:param l: Miller's index l.
:return content: split server's response.
"""
payload = {
'xway': 2,
'wave': energy,
'coway': 0,
'code': material,
'i1': h,
'i2': k,
'i3': l,
'df1df2': -1,
'modeout': 1,
}
r = requests.get(X0H_SERVER, params=payload, timeout=5)
content = r.text
content = content.split('\n')
return content
def _parse_xr_xi(string):
return float(string.split('=')[-1].strip())
|
mrakitin/sirepo
|
sirepo/crystal.py
|
Python
|
apache-2.0
| 5,120
|
[
"CRYSTAL"
] |
e01e42f0e4e120b867330d089aa0831d05c33b3a80a114051c20ba95b4d0e15a
|
from aces.materials import Material
from aces.modify import get_unique_atoms
from ase import Atoms,Atom
from math import pi,sqrt
from ase.dft.kpoints import ibz_points
from aces import config
from ase.lattice import bulk
import numpy as np
from aces.tools import *
class structure(Material):
def set_parameters(self):
self.cu=False
pass#['Gamma','Y','T','X','Gamma']
def setup(self):
self.forceThick=False
self.elements=['Na','Cl']
self.bandpoints=ibz_points['fcc']
self.bandpath=['Gamma','K','X','Gamma','L']
self.premitive/=np.array([self.latx,self.laty,self.latz])
if self.cu:
self.premitive=np.array([[0,.5,.5],[.5,0,.5],[.5,.5,0]])
def lmp_structure(self):
pos=np.array([[0,0,0],[.5,.5,.5]])
cell=2.8243625205414746*2*np.array([[0,.5,.5],[.5,0,.5],[.5,.5,0]])
atoms = Atoms('NaCl',scaled_positions=pos, cell=cell)
if self.cu:
pos=np.array([
[ 0.0000000000000000, 0.0000000000000000, 0.0000000000000000],
[ 0.0000000000000000, 0.5000000000000000, 0.5000000000000000],
[ 0.5000000000000000, 0.0000000000000000, 0.5000000000000000],
[ 0.5000000000000000, 0.5000000000000000, 0.0000000000000000],
[ 0.5000000000000000, 0.5000000000000000, 0.5000000000000000],
[ 0.5000000000000000, 0.0000000000000000, 0.0000000000000000],
[ 0.0000000000000000, 0.5000000000000000, 0.0000000000000000],
[ 0.0000000000000000, 0.0000000000000000, 0.5000000000000000]
])
cell=2.8243625205414746*2*np.eye(3)
atoms = Atoms('Na4Cl4',scaled_positions=pos, cell=cell)
atoms=atoms.repeat([self.latx,self.laty,self.latz])
atoms.set_pbc([self.xp,self.yp,self.zp])
return atoms
|
vanceeasleaf/aces
|
aces/materials/NaCl.py
|
Python
|
gpl-2.0
| 1,655
|
[
"ASE"
] |
af1cba7bd6427027dea21cffcca74d77bbadf9057340dd44bf3af3945d6e93fb
|
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(f2()))
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> next(k)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> next(k) # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print(list(f()))
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = list(range(1, 5))
>>> for k in range(len(seq) + 2):
... print("%d-combs of %s:" % (k, seq))
... for c in gcomb(seq, k):
... print(" ", c)
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<class 'function'>
>>> i = g()
>>> type(i)
<class 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'send', 'throw']
>>> from test.support import HAVE_DOCSTRINGS
>>> print(i.__next__.__doc__ if HAVE_DOCSTRINGS else 'x.__next__() <==> next(x)')
x.__next__() <==> next(x)
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<class 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
AttributeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return next(self.generator)
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.Random(42)
>>> while 1:
... for s in sets:
... print(" %s->%s" % (s, s.find()), end='')
... print()
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print("merged", s1, "into", s2)
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged K into B
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged A into F
A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged E into F
A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged D into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged M into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C
merged J into B
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C
merged B into C
A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C
merged F into G
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C
merged L into C
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C
merged G into I
A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C
merged I into H
A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C
merged C into H
A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [next(g) for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = next(ints)
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = next(g)
... nh = next(h)
... while 1:
... if ng < nh:
... yield ng
... ng = next(g)
... elif ng > nh:
... yield nh
... nh = next(h)
... else:
... yield ng
... ng = next(g)
... nh = next(h)
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print(firstn(result, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.__next__
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print([m235[j] for j in range(15*i, 15*(i+1))])
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def tail(g):
... next(g) # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print(firstn(it, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def _fib():
... yield 1
... yield 2
... next(fibTail) # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<class 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<class 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<class 'generator'>
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print(next(g))
0
>>> print(next(g))
1
>>> print(next(g))
2
>>> print(next(g))
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.__code__
True
>>> next(g)
5
>>> next(g)
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.__code__
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().__next__
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1 << j) | # column ordinal
(1 << (n + i-j + n-1)) | # NW-SE ordinal
(1 << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print(sep)
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print("|" + "|".join(squares) + "|")
print(sep)
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print(sep)
for i in range(m):
row = squares[i]
print("|" + "|".join(row) + "|")
print(sep)
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print(c)
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n)
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print(count, "solutions in all.")
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
>>> from test.support import gc_collect
Sending a value into a started generator:
>>> def f():
... print((yield 1))
... yield 2
>>> g = f()
>>> next(g)
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<class 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> next(c)
>>> print(seq)
[]
>>> c.send(10)
>>> print(seq)
[10]
>>> c.send(10)
>>> print(seq)
[10, 20]
>>> c.send(10)
>>> print(seq)
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
SyntaxError: 'yield' outside function
>>> def f(): x = yield = y
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print((yield))
... except ValueError as v:
... print("caught ValueError (%s)" % (v))
>>> import sys
>>> g = f()
>>> next(g)
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> g.throw("abc")
Traceback (most recent call last):
...
TypeError: exceptions must derive from BaseException, not str
>>> g.throw(0)
Traceback (most recent call last):
...
TypeError: exceptions must derive from BaseException, not int
>>> g.throw(list)
Traceback (most recent call last):
...
TypeError: exceptions must derive from BaseException, not type
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print(g.gi_frame)
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
Plain "raise" inside a generator should preserve the traceback (#13188).
The traceback should have 3 levels:
- g.throw()
- f()
- 1/0
>>> def f():
... try:
... yield
... except:
... raise
>>> g = f()
>>> try:
... 1/0
... except ZeroDivisionError as v:
... try:
... g.throw(v)
... except Exception as w:
... tb = w.__traceback__
>>> levels = 0
>>> while tb:
... levels += 1
... tb = tb.tb_next
>>> levels
3
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print("exiting")
>>> g = f()
>>> next(g)
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> next(g)
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print("exiting")
>>> g = f()
>>> next(g)
>>> del g; gc_collect()
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception:
... print('except')
... finally:
... print('finally')
>>> g = f()
>>> next(g)
>>> del g; gc_collect()
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, io
>>> old, sys.stderr = sys.stderr, io.StringIO()
>>> g = f()
>>> next(g)
>>> del g
>>> gc_collect()
>>> sys.stderr.getvalue().startswith(
... "Exception RuntimeError: 'generator ignored GeneratorExit' in "
... )
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<class 'generator'>
>>> def f(): x = yield
>>> type(f())
<class 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<class 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<class 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<class 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> from test.support import gc_collect
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def __next__(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = next(it)
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, io
>>> old = sys.stderr
>>> try:
... sys.stderr = io.StringIO()
... class Leaker:
... def __del__(self):
... raise RuntimeError
...
... l = Leaker()
... del l
... gc_collect()
... err = sys.stderr.getvalue().strip()
... err.startswith(
... "Exception RuntimeError: RuntimeError() in "
... )
... err.endswith("> ignored")
... len(err.splitlines())
... finally:
... sys.stderr = old
True
True
1
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import support, test_generators
support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
|
timm/timmnix
|
pypy3-v5.5.0-linux64/lib-python/3/test/test_generators.py
|
Python
|
mit
| 49,731
|
[
"VisIt"
] |
d7b49bcee7bdd5509440b1f53a3580e2eed869cc3818e262b6cd4f7263701295
|
import os
import sys
import random
import time
import math
import torchvision
from torch import optim
import io
from PIL import Image
import visdom
vis = visdom.Visdom()
import matplotlib.ticker as ticker
import socket
hostname = socket.gethostname()
from .pssm2go_model import *
from .baselines import *
from .consts import *
from pymongo import MongoClient
from tempfile import gettempdir
from shutil import copyfile
import pickle
import argparse
verbose = True
ckptpath = gettempdir()
SHOW_PLOT = False
USE_CUDA = False
USE_PRIOR = False
PAD_token = 0
SOS_token = 1
EOS_token = 2
MIN_LENGTH = 48
MAX_LENGTH = 480
MIN_COUNT = 2
t0 = datetime(2016, 2, 1, 0, 0)
t1 = datetime(2017, 2, 1, 0, 0)
class Lang(object):
def __init__(self, name):
self.name = name
self.trimmed = False
self.word2index = {}
self.word2count = {}
self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"}
self.n_words = 3 # Count default tokens
def index_words(self, sequence):
for word in sequence:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# Remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed: return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print('keep_words %s / %s = %.4f' % (
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"}
self.n_words = 3 # Count default tokens
for word in keep_words:
self.index_word(word)
def _get_labeled_data(db, query, limit, pssm=True):
c = limit if limit else db.goa_uniprot.count(query)
s = db.goa_uniprot.find(query)
if limit: s = s.limit(limit)
seqid2goid, _ = GoAnnotationCollectionLoader(s, c, ASPECT).load()
query = {"_id": {"$in": unique(list(seqid2goid.keys())).tolist()}}
if pssm:
num_seq = db.pssm.count(query)
src_seq = db.pssm.find(query)
seqid2seq = PssmCollectionLoader(src_seq, num_seq).load()
else:
num_seq = db.uniprot.count(query)
src_seq = db.uniprot.find(query)
seqid2seq = UniprotCollectionLoader(src_seq, num_seq).load()
seqid2goid = {k: v for k, v in seqid2goid.items() if len(v) > 1 or 'GO:0005515' not in v}
seqid2goid = {k: v for k, v in seqid2goid.items() if k in seqid2seq.keys()}
return seqid2seq, seqid2goid
def load_training_and_validation(db, limit=None):
q_train = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$lte": t0},
'Aspect': ASPECT}
sequences_train, annotations_train = _get_labeled_data(db, q_train, limit)
q_valid = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$gt": t0, "$lte": t1},
'Aspect': ASPECT}
sequences_valid, annotations_valid = _get_labeled_data(db, q_valid, limit)
forbidden = set(sequences_train.keys())
sequences_valid = {k: v for k, v in sequences_valid.items() if k not in forbidden}
annotations_valid = {k: v for k, v in annotations_valid.items() if k not in forbidden}
return sequences_train, annotations_train, sequences_valid, annotations_valid
def filter_records(records_gen):
filtered_records = []
original_records = []
for _, inp, pssm, prior, out in records_gen:
original_records.append((inp, pssm, prior, out))
if MIN_LENGTH <= len(inp) <= MAX_LENGTH:
filtered_records.append((inp, pssm, prior, out))
return original_records, filtered_records
class DataGenerator(object):
def __init__(self, seqid2seqpssm, seqid2goid, blast2go=None, one_leaf=False):
self.seqid2seqpssm = seqid2seqpssm
self.seqid2goid = seqid2goid
self.blast2go = blast2go
self.one_leaf = one_leaf
def __iter__(self):
seqid2goid = self.seqid2goid
seqid2seqpssm = self.seqid2seqpssm
for seqid in sorted(seqid2goid.keys(), key=lambda k: len(seqid2seqpssm[k][0])):
seq, pssm, _ = seqid2seqpssm[seqid]
if len(pssm) != len(seq):
print("WARN: wrong PSSM! (%s)" % seqid)
continue
matrix = [AA.aa2onehot[aa] + [pssm[i][AA.index2aa[k]] for k in range(20)]
for i, aa in enumerate(seq)]
seq = [aa for _, aa in enumerate(seq)]
if self.blast2go:
prior = self.blast2go[seqid]
else:
prior = None
if self.one_leaf:
annots = []
for leaf in seqid2goid[seqid]:
anc = onto.propagate([leaf], include_root=False)
if len(anc) > len(annots):
annots = anc
else:
annots = onto.propagate(seqid2goid[seqid], include_root=False)
yield (seqid, seq, matrix, prior, annots)
def prepare_data(records_gen):
records1, records2 = filter_records(records_gen)
print("Filtered %d to %d records" % (len(records1), len(records2)))
print("Indexing words...")
for record in records2:
input_lang.index_words(record[0])
output_lang.index_words(record[3])
print('Indexed %d words in input language, %d words in output' % (input_lang.n_words, output_lang.n_words))
return records2
def trim_records(records):
keep_records, trimmed_records = [], []
for i, record in enumerate(records):
n = len(records)
if verbose:
sys.stdout.write("\r{0:.0f}%".format(100.0 * i / n))
input_seq, _, _, output_annots = record
keep_input = True
keep_output = True
for word in input_seq:
if word not in input_lang.word2index:
keep_input = False
break
for word in output_annots:
if word not in output_lang.word2index:
keep_output = False
break
# Remove if record doesn't match input and output conditions
if keep_input and keep_output:
keep_records.append(record)
else:
trimmed_records.append(record)
print("\nTrimmed from %d records to %d, %.4f of total" % (len(records), len(keep_records), len(keep_records) / len(records)))
return keep_records, trimmed_records
# Return a list of indexes, one for each word in the sequence, plus EOS
def indexes_from_sequence(lang, seq, eos=1):
if eos == 1:
return [lang.word2index[word] for word in seq] + [EOS_token]
else:
return [lang.word2index[word] for word in seq]
# Pad a with zeros
def pad_pssm(seq, max_length):
seq = [(seq[i] if i < len(seq) else ([0.] * 40)) for i in range(max_length)]
return seq
# Pad a with the PAD symbol
def pad_seq(seq, max_length):
seq += [PAD_token for _ in range(max_length - len(seq))]
return seq
def get_batch(batch_size, ix=None):
# Choose random records
if not ix: ix = random.choice(list(range(len(trn_records) - batch_size)))
start, end = ix, min(ix + batch_size, len(trn_records))
sample = sorted([x for x in trn_records[start:end]], key=lambda x: -len(x[0]))
input_seqs = [indexes_from_sequence(input_lang, inp, eos=0) for (inp, _, _, _) in sample]
target_seqs = [indexes_from_sequence(output_lang, out) for (_, _, _, out) in sample]
input_pssms = [pssm for (_, pssm, _, _) in sample]
if USE_PRIOR:
blast_prior = [[prior[go] if go in prior else 0. for go in output_lang.word2index.keys()] for (_, _, prior, _) in sample]
prior_var = Variable(torch.FloatTensor(blast_prior))
else:
prior_var = None
# For input and target sequences, get array of lengths and pad with 0s to max length
input_lengths = [len(s) for s in input_seqs]
seq_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs]
pssm_padded = [pad_pssm(s, max(input_lengths)) for s in input_pssms]
target_lengths = [len(s) for s in target_seqs]
target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs]
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
seq_var = Variable(torch.LongTensor(seq_padded)).transpose(0, 1)
pssm_var = Variable(torch.FloatTensor(pssm_padded)).transpose(0, 1)
target_var = Variable(torch.LongTensor(target_padded)).transpose(0, 1)
if USE_CUDA:
seq_var = seq_var.cuda()
pssm_var = pssm_var.cuda()
target_var = target_var.cuda()
if USE_CUDA and USE_PRIOR:
prior_var = prior_var.cuda()
return seq_var, pssm_var, input_lengths, target_var, target_lengths, prior_var
def test_models():
small_batch_size = 3
input_seqs, input_pssms, input_lengths, target_batches, target_lengths, input_prior = get_batch(small_batch_size)
print('input_batches', input_seqs.size()) # (max_len x batch_size)
print('target_batches', target_batches.size()) # (max_len x batch_size)
small_hidden_size = 8
small_n_layers = 2
encoder_test = EncoderRCNN(small_hidden_size, input_lang.n_words, small_n_layers)
if USE_PRIOR:
decoder_test = LuongAttnDecoderRNN('general', small_hidden_size, output_lang.n_words, small_n_layers, output_lang.n_words - 3)
else:
decoder_test = LuongAttnDecoderRNN('general', small_hidden_size, output_lang.n_words, small_n_layers)
if USE_CUDA:
encoder_test.cuda()
decoder_test.cuda()
encoder_outputs, encoder_hidden = encoder_test(input_seqs, input_pssms, input_lengths, None)
print('encoder_outputs', encoder_outputs.size()) # max_len x batch_size x hidden_size
print('encoder_hidden', encoder_hidden.size()) # n_layers * 2 x batch_size x hidden_size
max_target_length = max(target_lengths)
# Prepare decoder input and outputs
decoder_input = Variable(torch.LongTensor([SOS_token] * small_batch_size))
decoder_hidden = encoder_hidden[:decoder_test.n_layers] # Use last (forward) hidden state from encoder
all_decoder_outputs = Variable(torch.zeros(max_target_length, small_batch_size, decoder_test.output_size))
if USE_CUDA:
all_decoder_outputs = all_decoder_outputs.cuda()
decoder_input = decoder_input.cuda()
# Run through decoder one time step at a time
for t in range(max_target_length):
decoder_output, decoder_hidden, decoder_attn = decoder_test(
decoder_input, decoder_hidden, encoder_outputs, input_prior
)
all_decoder_outputs[t] = decoder_output # Store this step's outputs
decoder_input = target_batches[t] # Next input is current target
# Test masked cross entropy loss
loss = masked_cross_entropy(
all_decoder_outputs.transpose(0, 1).contiguous(),
target_batches.transpose(0, 1).contiguous(),
target_lengths
)
print('loss', loss.data[0])
def train(input_seqs, input_pssms, input_lengths, target_batches, target_lengths, input_prior,
encoder, decoder, encoder_optimizer, decoder_optimizer,
batch_size, grad_clip, gamma, teacher_forcing):
# Zero gradients of both optimizers
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Run words through encoder
encoder_outputs, encoder_hidden = encoder(input_seqs, input_pssms, input_lengths, None)
# Prepare input and output variables
decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size))
decoder_hidden = encoder_hidden[:decoder.n_layers] # Use last (forward) hidden state from encoder
max_target_length = max(target_lengths)
all_decoder_outputs = Variable(torch.zeros(max_target_length, batch_size, decoder.output_size))
# Move new Variables to CUDA
if USE_CUDA:
decoder_input = decoder_input.cuda()
all_decoder_outputs = all_decoder_outputs.cuda()
# Run through decoder one time step at a time
for t in range(max_target_length):
decoder_output, decoder_hidden, decoder_attn = decoder(
decoder_input, decoder_hidden, encoder_outputs, input_prior
)
all_decoder_outputs[t] = decoder_output
if teacher_forcing == 1:
decoder_input = target_batches[t] # Next input is current target
else:
# Choose top word from output
_, topi = decoder_output.data.topk(1)
decoder_input = Variable(topi.squeeze(1))
# Loss calculation and backpropagation
loss = masked_cross_entropy(
all_decoder_outputs.transpose(0, 1).contiguous(), # -> batch x seq
target_batches.transpose(0, 1).contiguous(), # -> batch x seq
target_lengths, gamma=gamma
)
loss.backward()
# Clip gradient norms
ec = torch.nn.utils.clip_grad_norm(encoder.parameters(), grad_clip)
dc = torch.nn.utils.clip_grad_norm(decoder.parameters(), grad_clip)
# Update parameters with optimizers
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0], ec, dc
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
def evaluate(encoder, decoder, input_words, input_pssm, prior=None, max_length=MAX_LENGTH):
input_lengths = [len(input_words)]
input_seq = indexes_from_sequence(input_lang, input_words, eos=0)
input_seqs = Variable(torch.LongTensor([input_seq]), volatile=True).transpose(0, 1)
input_pssms = Variable(torch.FloatTensor([input_pssm]), volatile=True).transpose(0, 1)
if USE_PRIOR:
input_prior = [prior[go] if go in prior else 0. for go in output_lang.word2index.keys()]
input_prior = Variable(torch.FloatTensor([input_prior]))
else:
input_prior = None
if USE_CUDA:
input_seqs = input_seqs.cuda()
input_pssms = input_pssms.cuda()
if USE_CUDA and USE_PRIOR:
input_prior = input_prior.cuda()
# Set to not-training mode to disable dropout
encoder.train(False)
decoder.train(False)
# Run through encoder
encoder_outputs, encoder_hidden = encoder(input_seqs, input_pssms, input_lengths, None)
# Create starting vectors for decoder
decoder_input = Variable(torch.LongTensor([SOS_token]), volatile=True) # SOS
decoder_hidden = encoder_hidden[:decoder.n_layers] # Use last (forward) hidden state from encoder
if USE_CUDA:
decoder_input = decoder_input.cuda()
# Store output words and attention states
decoded_words = []
decoder_attentions = torch.zeros(max_length + 1, max_length + 1)
# Run through decoder
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs, input_prior
)
decoder_attentions[di, :decoder_attention.size(2)] += decoder_attention.squeeze(0).squeeze(0).cpu().data
# Choose top word from output
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[ni])
# Next input is chosen word
decoder_input = Variable(torch.LongTensor([ni]))
if USE_CUDA: decoder_input = decoder_input.cuda()
# Set back to training mode
encoder.train(True)
decoder.train(True)
return decoded_words, decoder_attentions[:di + 1, :len(encoder_outputs)]
def evaluate_randomly(encoder, decoder):
[input_seq, input_pssm, prior, target_seq] = random.choice(tst_records)
evaluate_and_show_attention(encoder, decoder, input_seq, input_pssm, target_seq, prior)
def show_attention(input_sequence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sequence.split(' ') + ['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
show_plot_visdom()
plt.show()
plt.close()
def show_plot_visdom():
buf = io.BytesIO()
plt.savefig(buf)
buf.seek(0)
attn_win = 'attention (%s)' % hostname
im = Image.open(buf).convert("RGB")
vis.image(torchvision.transforms.ToTensor()(im), win=attn_win, opts={'title': attn_win})
def evaluate_and_show_attention(encoder, decoder, input_words, input_pssm, target_words=None, input_prior=None):
output_words, attentions = evaluate(encoder, decoder, input_words, input_pssm, input_prior)
output_sequence = ' '.join(output_words)
input_sequence = ' '.join(input_words)
target_sequence = ' '.join(target_words)
print('>', input_sequence)
if target_sequence is not None:
print('=', target_sequence)
print('<', output_sequence)
if not SHOW_PLOT:
return
show_attention(input_sequence, output_words, attentions)
# Show input, target, output text in visdom
win = 'evaluted (%s)' % hostname
text = '<p>> %s</p><p>= %s</p><p>< %s</p>' % (input_sequence, target_sequence, output_sequence)
vis.text(text, win=win, opts={'title': win})
def show_plot(points):
plt.figure()
fig, ax = plt.subplots()
loc = ticker.MultipleLocator(base=0.2) # put ticks at regular intervals
ax.yaxis.set_major_locator(loc)
plt.plot(points)
def add_arguments(parser):
parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/',
help="Supply the URL of MongoDB")
parser.add_argument("-a", "--aspect", type=str, choices=['F', 'P', 'C'],
required=True, help="Specify the ontology aspect.")
parser.add_argument("-o", "--out_dir", type=str, required=False,
default=gettempdir(), help="Specify the output directory.")
parser.add_argument("-m", "--model_name", type=str, required=False,
default="pssm2go", help="Specify the model name.")
parser.add_argument("-q", '--quiet', action='store_true', default=False,
help="Run in quiet mode.")
parser.add_argument('--blast2go', action='store_true', default=False,
help="Specify whether to use blast2go predictions.")
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument("-d", "--device", type=str, default='cpu',
help="Specify what device you'd like to use e.g. 'cpu', 'gpu0' etc.")
parser.add_argument("-p", "--print_every", type=int, default=10,
help="How often should main_loop print training stats.")
parser.add_argument("-e", "--eval_every", type=int, default=100,
help="How often should main_loop evaluate the model.")
parser.add_argument("-l", "--max_length", type=int, default=200,
help="Max sequence length (both input and output).")
parser.add_argument("-c", "--min_count", type=int, default=2,
help="Minimal word count (both input and output).")
parser.add_argument("--num_cpu", type=int, default=4,
help="How many cpus for computing blast2go prior")
def save_checkpoint(state, is_best=False):
filename_late = os.path.join(ckptpath, "%s-%s-latest.tar"
% (args.model_name, GoAspect(args.aspect)))
torch.save(state, filename_late)
if is_best:
filename_best = os.path.join(ckptpath, "%s-%s-best.tar"
% (args.model_name, GoAspect(args.aspect)))
copyfile(filename_late, filename_best)
# https://github.com/pytorch/pytorch/issues/2830
def optimizer_cuda(optimizer):
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
def main_loop(
# Configure models
attn_model='general',
decoder_hidden_size=500,
encoder_hidden_size=500,
n_layers=2,
dropout=0.01,
batch_size=12,
# batch_size=50,
# Configure training/optimization
clip=50.0,
gamma=2.0,
teacher_forcing_ratio=0.8,
learning_rate=0.1,
decoder_learning_ratio=5.0,
n_epochs=500,
epoch=0,
print_every=20,
evaluate_every=1000
):
assert encoder_hidden_size == decoder_hidden_size
# Initialize models
encoder = EncoderRCNN(encoder_hidden_size, input_lang.n_words, n_layers, dropout=dropout)
if USE_PRIOR:
decoder = LuongAttnDecoderRNN(attn_model, decoder_hidden_size, output_lang.n_words, n_layers,
dropout=dropout, embedding=output_embedding, prior_size=output_lang.n_words - 3)
else:
decoder = LuongAttnDecoderRNN(attn_model, decoder_hidden_size, output_lang.n_words, n_layers,
dropout=dropout, embedding=output_embedding)
# Initialize optimizers and criterion
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '%s'" % args.resume)
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)
epoch = checkpoint['epoch']
encoder.load_state_dict(checkpoint['encoder'])
decoder.load_state_dict(checkpoint['decoder'])
encoder_optimizer.load_state_dict(checkpoint['encoder_optimizer'])
decoder_optimizer.load_state_dict(checkpoint['decoder_optimizer'])
else:
print("=> no checkpoint found at '%s'" % args.resume)
# Move models to GPU
if USE_CUDA:
encoder.cuda()
decoder.cuda()
if USE_CUDA and args.resume:
optimizer_cuda(encoder_optimizer)
optimizer_cuda(decoder_optimizer)
# Keep track of time elapsed and running averages
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
while epoch < n_epochs:
epoch += 1
# Get training data for this cycle
i = 0
while i * batch_size < len(trn_records):
input_seqs, input_pssms, input_lengths, target_batches, target_lengths, input_prior = get_batch(batch_size)
# Run the train function
loss, ec, dc = train(
input_seqs, input_pssms, input_lengths, target_batches, target_lengths, input_prior,
encoder, decoder,
encoder_optimizer, decoder_optimizer,
batch_size, clip, gamma,
np.random.binomial(1, teacher_forcing_ratio)
)
# Keep track of loss
print_loss_total += loss
plot_loss_total += loss
if i % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
j = (i + 1) * batch_size
n = len(trn_records)
ratio = j / n
print_summary = '%s [%d/%d] (%d/%d %d%%) %.4f' % (
time_since(start, ratio), epoch, n_epochs, j, n, ratio, print_loss_avg)
print(print_summary)
if i % evaluate_every == 0:
evaluate_randomly(encoder, decoder)
save_checkpoint({
'epoch': epoch,
'encoder': encoder.state_dict(),
'decoder': decoder.state_dict(),
'encoder_optimizer': encoder_optimizer.state_dict(),
'decoder_optimizer': decoder_optimizer.state_dict()
})
i += 1
def set_output_lang(lang):
global output_lang
output_lang = lang
def set_input_lang(lang):
global input_lang
input_lang = lang
def set_ontology(ontology):
global onto
onto = ontology
def set_show_attn(val):
global SHOW_PLOT
SHOW_PLOT = val
def set_use_cuda(val):
global USE_CUDA
USE_CUDA = val
set_cuda(val)
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
# Load and Prepare the data
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
set_use_cuda('gpu' in args.device)
MAX_LENGTH = args.max_length
MIN_COUNT = args.min_count
if USE_CUDA:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = args.device[-1]
verbose = not args.quiet
ckptpath = args.out_dir
client = MongoClient(args.mongo_url)
db = client['prot2vec']
onto = init_GO(args.aspect)
trn_seq2pssm, trn_seq2go, tst_seq2pssm, tst_seq2go = load_training_and_validation(db, limit=None)
if args.blast2go:
USE_PRIOR = True
pred_path = os.path.join(tmp_dir, 'pred-blast-%s.npy' % GoAspect(ASPECT))
if os.path.exists(pred_path):
blast2go = np.load(pred_path).item()
else:
targets = {k: v[0] for k, v in list(trn_seq2pssm.items()) + list(tst_seq2pssm.items())}
q = {'DB': 'UniProtKB', 'Evidence': {'$in': exp_codes}, 'Date': {"$lte": t0}, 'Aspect': ASPECT}
reference, _ = _get_labeled_data(db, q, limit=None, pssm=False)
blast2go = parallel_blast(targets, reference, num_cpu=args.num_cpu)
np.save(pred_path, blast2go)
else:
USE_PRIOR = False
blast2go = None
input_lang = Lang("AA")
output_lang = Lang("GO")
trn_records = prepare_data(DataGenerator(trn_seq2pssm, trn_seq2go, blast2go))
tst_records = prepare_data(DataGenerator(tst_seq2pssm, tst_seq2go, blast2go))
input_lang.trim(MIN_COUNT)
output_lang.trim(MIN_COUNT)
save_object(input_lang, os.path.join(ckptpath, "aa-lang-%s.pkl" % GoAspect(args.aspect)))
save_object(output_lang, os.path.join(ckptpath, "go-lang-%s.pkl" % GoAspect(args.aspect)))
trn_records, _ = trim_records(trn_records)
tst_records, _ = trim_records(tst_records)
test_models()
input_embedding = None
output_embedding = None
main_loop(
print_every=args.print_every,
evaluate_every=args.eval_every
)
|
yotamfr/prot2vec
|
src/python/pssm2go_train.py
|
Python
|
mit
| 27,410
|
[
"BLAST"
] |
8c52bac0333276678104d35303faf6bd0724d81880ee8cda185e8dc54a20dee1
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Package implementing cryptography related functionality.
"""
from __future__ import unicode_literals
import random
import base64
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtWidgets import QLineEdit, QInputDialog
from E5Gui import E5MessageBox
import Preferences
###############################################################################
## password handling functions below
###############################################################################
EncodeMarker = "CE4"
CryptoMarker = "CR5"
Delimiter = "$"
MasterPassword = None
def pwEncode(pw):
"""
Module function to encode a password.
@param pw password to encode (string)
@return encoded password (string)
"""
pop = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" \
".,;:-_!$?*+#"
rpw = "".join(random.sample(pop, 32)) + pw + \
"".join(random.sample(pop, 32))
return EncodeMarker + base64.b64encode(rpw.encode("utf-8")).decode("ascii")
def pwDecode(epw):
"""
Module function to decode a password.
@param epw encoded password to decode (string)
@return decoded password (string)
"""
if not epw.startswith(EncodeMarker):
return epw # it was not encoded using pwEncode
return base64.b64decode(epw[3:].encode("ascii"))[32:-32].decode("utf-8")
def __getMasterPassword():
"""
Private module function to get the password from the user.
"""
global MasterPassword
pw, ok = QInputDialog.getText(
None,
QCoreApplication.translate("Crypto", "Master Password"),
QCoreApplication.translate("Crypto", "Enter the master password:"),
QLineEdit.Password)
if ok:
from .py3PBKDF2 import verifyPassword
masterPassword = Preferences.getUser("MasterPassword")
try:
if masterPassword:
if verifyPassword(pw, masterPassword):
MasterPassword = pwEncode(pw)
else:
E5MessageBox.warning(
None,
QCoreApplication.translate(
"Crypto", "Master Password"),
QCoreApplication.translate(
"Crypto",
"""The given password is incorrect."""))
else:
E5MessageBox.critical(
None,
QCoreApplication.translate("Crypto", "Master Password"),
QCoreApplication.translate(
"Crypto",
"""There is no master password registered."""))
except ValueError as why:
E5MessageBox.warning(
None,
QCoreApplication.translate("Crypto", "Master Password"),
QCoreApplication.translate(
"Crypto",
"""<p>The given password cannot be verified.</p>"""
"""<p>Reason: {0}""".format(str(why))))
def pwEncrypt(pw, masterPW=None):
"""
Module function to encrypt a password.
@param pw password to encrypt (string)
@param masterPW password to be used for encryption (string)
@return encrypted password (string) and flag indicating
success (boolean)
"""
if masterPW is None:
if MasterPassword is None:
__getMasterPassword()
if MasterPassword is None:
return "", False
masterPW = pwDecode(MasterPassword)
from .py3PBKDF2 import hashPasswordTuple
digestname, iterations, salt, hash = hashPasswordTuple(masterPW)
key = hash[:32]
from .py3AES import encryptData
try:
cipher = encryptData(key, pw.encode("utf-8"))
except ValueError:
return "", False
return CryptoMarker + Delimiter.join([
digestname,
str(iterations),
base64.b64encode(salt).decode("ascii"),
base64.b64encode(cipher).decode("ascii")
]), True
def pwDecrypt(epw, masterPW=None):
"""
Module function to decrypt a password.
@param epw hashed password to decrypt (string)
@param masterPW password to be used for decryption (string)
@return decrypted password (string) and flag indicating
success (boolean)
"""
if not epw.startswith(CryptoMarker):
return epw, False # it was not encoded using pwEncrypt
if masterPW is None:
if MasterPassword is None:
__getMasterPassword()
if MasterPassword is None:
return "", False
masterPW = pwDecode(MasterPassword)
from .py3AES import decryptData
from .py3PBKDF2 import rehashPassword
hashParameters, epw = epw[3:].rsplit(Delimiter, 1)
try:
# recreate the key used to encrypt
key = rehashPassword(masterPW, hashParameters)[:32]
plaintext = decryptData(key, base64.b64decode(epw.encode("ascii")))
except ValueError:
return "", False
return plaintext.decode("utf-8"), True
def pwReencrypt(epw, oldPassword, newPassword):
"""
Module function to re-encrypt a password.
@param epw hashed password to re-encrypt (string)
@param oldPassword password used to encrypt (string)
@param newPassword new password to be used (string)
@return encrypted password (string) and flag indicating
success (boolean)
"""
plaintext, ok = pwDecrypt(epw, oldPassword)
if ok:
return pwEncrypt(plaintext, newPassword)
else:
return "", False
def pwRecode(epw, oldPassword, newPassword):
"""
Module function to re-encode a password.
In case of an error the encoded password is returned unchanged.
@param epw encoded password to re-encode (string)
@param oldPassword password used to encode (string)
@param newPassword new password to be used (string)
@return encoded password (string)
"""
if epw == "":
return epw
if newPassword == "":
plaintext, ok = pwDecrypt(epw)
return (pwEncode(plaintext) if ok else epw)
else:
if oldPassword == "":
plaintext = pwDecode(epw)
cipher, ok = pwEncrypt(plaintext, newPassword)
return (cipher if ok else epw)
else:
npw, ok = pwReencrypt(epw, oldPassword, newPassword)
return (npw if ok else epw)
def pwConvert(pw, encode=True):
"""
Module function to convert a plaintext password to the encoded form or
vice versa.
If there is an error, an empty code is returned for the encode function
or the given encoded password for the decode function.
@param pw password to encode (string)
@param encode flag indicating an encode or decode function (boolean)
@return encoded or decoded password (string)
"""
if pw == "":
return pw
if encode:
# plain text -> encoded
if Preferences.getUser("UseMasterPassword"):
epw = pwEncrypt(pw)[0]
else:
epw = pwEncode(pw)
return epw
else:
# encoded -> plain text
if Preferences.getUser("UseMasterPassword"):
plain, ok = pwDecrypt(pw)
else:
plain, ok = pwDecode(pw), True
return (plain if ok else pw)
def changeRememberedMaster(newPassword):
"""
Module function to change the remembered master password.
@param newPassword new password to be used (string)
"""
global MasterPassword
if newPassword == "":
MasterPassword = None
else:
MasterPassword = pwEncode(newPassword)
def dataEncrypt(data, password, keyLength=32, hashIterations=10000):
"""
Module function to encrypt a password.
@param data data to encrypt (bytes)
@param password password to be used for encryption (string)
@keyparam keyLength length of the key to be generated for encryption
(16, 24 or 32)
@keyparam hashIterations number of hashes to be applied to the password for
generating the encryption key (integer)
@return encrypted data (bytes) and flag indicating
success (boolean)
"""
from .py3AES import encryptData
from .py3PBKDF2 import hashPasswordTuple
digestname, iterations, salt, hash = \
hashPasswordTuple(password, iterations=hashIterations)
key = hash[:keyLength]
try:
cipher = encryptData(key, data)
except ValueError:
return b"", False
return CryptoMarker.encode("utf-8") + Delimiter.encode("utf-8").join([
digestname.encode("utf-8"),
str(iterations).encode("utf-8"),
base64.b64encode(salt),
base64.b64encode(cipher)
]), True
def dataDecrypt(edata, password, keyLength=32):
"""
Module function to decrypt a password.
@param edata hashed data to decrypt (string)
@param password password to be used for decryption (string)
@keyparam keyLength length of the key to be generated for decryption
(16, 24 or 32)
@return decrypted data (bytes) and flag indicating
success (boolean)
"""
if not edata.startswith(CryptoMarker.encode("utf-8")):
return edata, False # it was not encoded using dataEncrypt
from .py3AES import decryptData
from .py3PBKDF2 import rehashPassword
hashParametersBytes, edata = edata[3:].rsplit(Delimiter.encode("utf-8"), 1)
hashParameters = hashParametersBytes.decode()
try:
# recreate the key used to encrypt
key = rehashPassword(password, hashParameters)[:keyLength]
plaintext = decryptData(key, base64.b64decode(edata))
except ValueError:
return "", False
return plaintext, True
if __name__ == "__main__":
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication([])
mpw = "blahblah"
cpw = "SomeSecret"
cipher, ok = pwEncrypt(cpw)
print(ok, cipher)
plain, ok = pwDecrypt(cipher)
print(ok, plain)
cipher, ok = pwEncrypt(cpw, mpw)
print(ok, cipher)
plain, ok = pwDecrypt(cipher, mpw)
print(ok, plain)
sys.exit(0)
|
paulmadore/Eric-IDE
|
6-6.0.9/eric/Utilities/crypto/__init__.py
|
Python
|
gpl-3.0
| 10,304
|
[
"EPW"
] |
bda8cb9c914006b5ca09ccfec9746668060abd6659d449fb690d5f55ad6553bc
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import timedelta
from collections import defaultdict
import functools
import itertools
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from . import common
from . import duck_array_ops
from . import dtypes
from . import indexing
from . import nputils
from . import ops
from . import utils
from .pycompat import (basestring, OrderedDict, zip, integer_types,
dask_array_type)
from .indexing import (PandasIndexAdapter, orthogonally_indexable)
import xarray as xr # only for Dataset and DataArray
try:
import dask.array as da
except ImportError:
pass
def as_variable(obj, name=None):
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
# TODO: consider extending this method to automatically handle Iris and
# pandas objects.
if hasattr(obj, 'variable'):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif hasattr(obj, 'dims') and (hasattr(obj, 'data') or
hasattr(obj, 'values')):
obj_data = getattr(obj, 'data', None)
if obj_data is None:
obj_data = getattr(obj, 'values')
obj = Variable(obj.dims, obj_data,
getattr(obj, 'attrs', None),
getattr(obj, 'encoding', None))
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except TypeError:
# use .format() instead of % because it handles tuples consistently
raise TypeError('tuples to convert into variables must be of the '
'form (dims, data[, attrs, encoding]): '
'{}'.format(obj))
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif getattr(obj, 'name', None) is not None:
obj = Variable(obj.name, obj)
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise ValueError(
'cannot set variable %r with %r-dimensional data '
'without explicit dimension names. Pass a tuple of '
'(dims, data) instead.' % (name, data.ndim))
obj = Variable(name, obj, fastpath=True)
else:
raise TypeError('unable to convert object into a variable without an '
'explicit list of dimensions: %r' % obj)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise ValueError(
'%r has more than 1-dimension and the same name as one of its '
'dimensions %r. xarray disallows such variables because they '
'conflict with the coordinates used to label dimensions.'
% (name, obj.dims))
obj = obj.to_index_variable()
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexAdapter(data)
return data
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64, according to the pandas convention.
"""
return np.asarray(pd.Series(values.ravel())).reshape(values.shape)
def as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xarray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, 'ndim', 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
if isinstance(data, Variable):
return data.data
# add a custom fast-path for dask.array to avoid expensive checks for the
# dtype attribute
if isinstance(data, dask_array_type):
return data
if isinstance(data, pd.Index):
return _maybe_wrap_data(data)
if isinstance(data, tuple):
data = utils.to_0d_object_array(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, 'ns')
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, 'value', data), 'ns')
if (not hasattr(data, 'dtype') or not isinstance(data.dtype, np.dtype) or
not hasattr(data, 'shape') or
isinstance(data, (np.string_, np.unicode_,
np.datetime64, np.timedelta64))):
# data must be ndarray-like
# don't allow non-numpy dtypes (e.g., categories)
data = np.asarray(data)
# we don't want nested self-described arrays
data = getattr(data, 'values', data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = dtypes.maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if isinstance(data, np.ndarray):
if data.dtype.kind == 'O':
data = _possibly_convert_objects(data)
elif data.dtype.kind == 'M':
data = np.asarray(data, 'datetime64[ns]')
elif data.dtype.kind == 'm':
data = np.asarray(data, 'timedelta64[ns]')
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed
"""
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == 'M':
data = np.datetime64(data, 'ns')
elif data.dtype.kind == 'm':
data = np.timedelta64(data, 'ns')
return data
class Variable(common.AbstractArray, utils.NdimSizeLenMixin):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well-behaved code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def nbytes(self):
return self.size * self.dtype.itemsize
@property
def _in_memory(self):
return (isinstance(self._data, (np.ndarray, PandasIndexAdapter)) or
(isinstance(self._data, indexing.MemoryCachedArray) and
isinstance(self._data.array, np.ndarray)))
@property
def data(self):
if isinstance(self._data, dask_array_type):
return self._data
else:
return self.values
@data.setter
def data(self, data):
data = as_compatible_data(data)
if data.shape != self.shape:
raise ValueError(
"replacement data must match the Variable's shape")
self._data = data
@property
def _indexable_data(self):
return orthogonally_indexable(self._data)
def load(self):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
"""
if not isinstance(self._data, np.ndarray):
self._data = np.asarray(self._data)
return self
def compute(self):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return a new variable. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
"""
new = self.copy(deep=False)
return new.load()
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data)
@values.setter
def values(self, values):
self.data = values
def to_base_variable(self):
"""Return this variable as a base xarray.Variable"""
return Variable(self.dims, self._data, self._attrs,
encoding=self._encoding, fastpath=True)
to_variable = utils.alias(to_base_variable, 'to_variable')
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return IndexVariable(self.dims, self._data, self._attrs,
encoding=self._encoding, fastpath=True)
to_coord = utils.alias(to_index_variable, 'to_coord')
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_index_variable().to_index()
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated.
"""
return self._dims
def _parse_dimensions(self, dims):
if isinstance(dims, basestring):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError('dimensions %s must have the same length as the '
'number of data dimensions, ndim=%s'
% (dims, self.ndim))
return dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def __getitem__(self, key):
"""Return a new Array object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement "orthogonal indexing" like
netCDF4-python, where the key can only include integers, slices
(including `Ellipsis`) and 1d arrays, each of which are applied
orthogonally along their respective dimensions.
The difference does not matter in most cases unless you are using
numpy's "fancy indexing," which can otherwise result in data arrays
whose shapes is inconsistent (or just uninterpretable with) with the
variable's dimensions.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
key = self._item_key_to_tuple(key)
key = indexing.expanded_indexer(key, self.ndim)
dims = tuple(dim for k, dim in zip(key, self.dims)
if not isinstance(k, integer_types))
values = self._indexable_data[key]
# orthogonal indexing should ensure the dimensionality is consistent
if hasattr(values, 'ndim'):
assert values.ndim == len(dims), (values.ndim, len(dims))
else:
assert len(dims) == 0, len(dims)
return type(self)(dims, values, self._attrs, self._encoding,
fastpath=True)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
key = self._item_key_to_tuple(key)
if isinstance(self._data, dask_array_type):
raise TypeError("this variable's data is stored in a dask array, "
'which does not support item assignment. To '
'assign to this variable, you must first load it '
'into memory explicitly using the .load() '
'method or accessing its .values attribute.')
data = orthogonally_indexable(self._data)
data[key] = value
@property
def attrs(self):
"""Dictionary of local attributes on this variable.
"""
if self._attrs is None:
self._attrs = OrderedDict()
return self._attrs
@attrs.setter
def attrs(self, value):
self._attrs = OrderedDict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable.
"""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError('encoding must be castable to a dictionary')
def copy(self, deep=True):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
"""
data = self._data
if isinstance(data, indexing.MemoryCachedArray):
# don't share caching between copies
data = indexing.MemoryCachedArray(data.array)
if deep:
if isinstance(data, dask_array_type):
data = data.copy()
elif not isinstance(data, PandasIndexAdapter):
# pandas.Index is immutable
data = np.array(data)
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
__hash__ = None
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, 'chunks', None)
_array_counter = itertools.count()
def chunk(self, chunks=None, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable
"""
import dask.array as da
if utils.is_dict_like(chunks):
chunks = dict((self.get_axis_num(dim), chunk)
for dim, chunk in chunks.items())
if chunks is None:
chunks = self.chunks or self.shape
data = self._data
if isinstance(data, da.Array):
data = data.rechunk(chunks)
else:
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s)
for n, s in enumerate(self.shape))
data = da.from_array(data, chunks, name=name, lock=lock)
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def isel(self, **indexers):
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
invalid = [k for k in indexers if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
key = [slice(None)] * self.ndim
for i, dim in enumerate(self.dims):
if dim in indexers:
key[i] = indexers[dim]
return self[tuple(key)]
def squeeze(self, dim=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = common.get_squeeze_dims(self, dim)
return self.isel(**{d: 0 for d in dims})
def _shift_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
if count > 0:
keep = slice(None, -count)
elif count < 0:
keep = slice(-count, None)
else:
keep = slice(None)
trimmed_data = self[(slice(None),) * axis + (keep,)].data
dtype, fill_value = dtypes.maybe_promote(self.dtype)
shape = list(self.shape)
shape[axis] = min(abs(count), shape[axis])
if isinstance(trimmed_data, dask_array_type):
chunks = list(trimmed_data.chunks)
chunks[axis] = (shape[axis],)
full = functools.partial(da.full, chunks=chunks)
else:
full = np.full
nans = full(shape, fill_value, dtype=dtype)
if count > 0:
arrays = [nans, trimmed_data]
else:
arrays = [trimmed_data, nans]
data = duck_array_ops.concatenate(arrays, axis)
if isinstance(data, dask_array_type):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def shift(self, **shifts):
"""
Return a new Variable with shifted data.
Parameters
----------
**shifts : keyword arguments of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but shifted data.
"""
result = self
for dim, count in shifts.items():
result = result._shift_one_dim(dim, count)
return result
def _roll_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
count %= self.shape[axis]
if count != 0:
indices = [slice(-count, None), slice(None, -count)]
else:
indices = [slice(None)]
arrays = [self[(slice(None),) * axis + (idx,)].data
for idx in indices]
data = duck_array_ops.concatenate(arrays, axis)
if isinstance(data, dask_array_type):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def roll(self, **shifts):
"""
Return a new Variable with rolld data.
Parameters
----------
**shifts : keyword arguments of the form {dim: offset}
Integer offset to roll along each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but rolled data.
"""
result = self
for dim, count in shifts.items():
result = result._roll_one_dim(dim, count)
return result
def transpose(self, *dims):
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
Although this operation returns a view of this variable's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
axes = self.get_axis_num(dims)
if len(dims) < 2: # no need to transpose if only one dimension
return self.copy(deep=False)
data = duck_array_ops.transpose(self.data, axes)
return type(self)(dims, data, self._attrs, self._encoding,
fastpath=True)
def expand_dims(self, *args):
import warnings
warnings.warn('Variable.expand_dims is deprecated: use '
'Variable.set_dims instead', DeprecationWarning,
stacklevel=2)
return self.expand_dims(*args)
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, basestring):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError('new dimensions must be a superset of existing '
'dimensions')
self_dims = set(self.dims)
expanded_dims = tuple(
d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[
(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(expanded_dims, expanded_data, self._attrs,
self._encoding, fastpath=True)
return expanded_var.transpose(*dims)
def _stack_once(self, dims, new_dim):
if not set(dims) <= set(self.dims):
raise ValueError('invalid existing dimensions: %s' % dims)
if new_dim in self.dims:
raise ValueError('cannot create a new dimension with the same '
'name as an existing dimension')
if len(dims) == 0:
# don't stack
return self.copy(deep=False)
other_dims = [d for d in self.dims if d not in dims]
dim_order = other_dims + list(dims)
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[:len(other_dims)] + (-1,)
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[:len(other_dims)] + (new_dim,)
return Variable(new_dims, new_data, self._attrs, self._encoding,
fastpath=True)
def stack(self, **dimensions):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
**dimensions : keyword arguments of the form new_name=(dim1, dim2, ...)
Names of new dimensions, and the existing dimensions that they
replace.
Returns
-------
stacked : Variable
Variable with the same attributes but stacked data.
See also
--------
Variable.unstack
"""
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def _unstack_once(self, dims, old_dim):
new_dim_names = tuple(dims.keys())
new_dim_sizes = tuple(dims.values())
if old_dim not in self.dims:
raise ValueError('invalid existing dimension: %s' % old_dim)
if set(new_dim_names).intersection(self.dims):
raise ValueError('cannot create a new dimension with the same '
'name as an existing dimension')
if np.prod(new_dim_sizes) != self.sizes[old_dim]:
raise ValueError('the product of the new dimension sizes must '
'equal the size of the old dimension')
other_dims = [d for d in self.dims if d != old_dim]
dim_order = other_dims + [old_dim]
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[:len(other_dims)] + new_dim_sizes
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[:len(other_dims)] + new_dim_names
return Variable(new_dims, new_data, self._attrs, self._encoding,
fastpath=True)
def unstack(self, **dimensions):
"""
Unstack an existing dimension into multiple new dimensions.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
**dimensions : keyword arguments of the form old_dim={dim1: size1, ...}
Names of existing dimensions, and the new dimensions and sizes that they
map to.
Returns
-------
unstacked : Variable
Variable with the same attributes but unstacked data.
See also
--------
Variable.stack
"""
result = self
for old_dim, dims in dimensions.items():
result = result._unstack_once(dims, old_dim)
return result
def fillna(self, value):
return ops.fillna(self, value)
def where(self, cond, other=dtypes.NA):
return ops.where_method(self, cond, other)
def reduce(self, func, dim=None, axis=None, keep_attrs=False,
allow_lazy=False, **kwargs):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if getattr(func, 'keep_dims', False):
if dim is None and axis is None:
raise ValueError("must supply either single 'dim' or 'axis' "
"argument to %s" % (func.__name__))
if dim is not None:
axis = self.get_axis_num(dim)
data = func(self.data if allow_lazy else self.values,
axis=axis, **kwargs)
if getattr(data, 'shape', ()) == self.shape:
dims = self.dims
else:
removed_axes = (range(self.ndim) if axis is None
else np.atleast_1d(axis) % self.ndim)
dims = [adim for n, adim in enumerate(self.dims)
if n not in removed_axes]
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(cls, variables, dim='concat_dim', positions=None,
shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Array
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
if not isinstance(dim, basestring):
dim, = dim.dims
# can't do this lazily: we need to loop through variables at least
# twice
variables = list(variables)
first_var = variables[0]
arrays = [v.data for v in variables]
# TODO: use our own type promotion rules to ensure that
# [str, float] -> object, not str like numpy
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
dims = first_var.dims
data = duck_array_ops.concatenate(arrays, axis=axis)
if positions is not None:
# TODO: deprecate this option -- we don't need it for groupby
# any more.
indices = nputils.inverse_permutation(
np.concatenate(positions))
data = duck_array_ops.take(data, indices, axis=axis)
else:
axis = 0
dims = (dim,) + first_var.dims
data = duck_array_ops.stack(arrays, axis=axis)
attrs = OrderedDict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError('inconsistent dimensions')
utils.remove_incompatible_items(attrs, var.attrs)
return cls(dims, data, attrs)
def equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisons (like numpy.ndarrays).
"""
other = getattr(other, 'variable', other)
try:
return (self.dims == other.dims and
(self._data is other._data or
equiv(self.data, other.data)))
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other, equiv=equiv)
def identical(self, other):
"""Like equals, but also checks attributes.
"""
try:
return (utils.dict_equiv(self.attrs, other.attrs) and
self.equals(other))
except (TypeError, AttributeError):
return False
def no_conflicts(self, other):
"""True if the intersection of two Variable's non-null data is
equal; otherwise false.
Variables can thus still be equal if there are locations where either,
or both, contain NaN values.
"""
return self.broadcast_equals(
other, equiv=duck_array_ops.array_notnull_equiv)
def quantile(self, q, dim=None, interpolation='linear'):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
if isinstance(self.data, dask_array_type):
raise TypeError("quantile does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method.")
if LooseVersion(np.__version__) < LooseVersion('1.10.0'):
raise NotImplementedError(
'quantile requres numpy version 1.10.0 or later')
q = np.asarray(q, dtype=np.float64)
new_dims = list(self.dims)
if dim is not None:
axis = self.get_axis_num(dim)
if utils.is_scalar(dim):
new_dims.remove(dim)
else:
for d in dim:
new_dims.remove(d)
else:
axis = None
new_dims = []
# only add the quantile dimension if q is array like
if q.ndim != 0:
new_dims = ['quantile'] + new_dims
qs = np.nanpercentile(self.data, q * 100., axis=axis,
interpolation=interpolation)
return Variable(new_dims, qs)
@property
def real(self):
return type(self)(self.dims, self.data.real, self._attrs)
@property
def imag(self):
return type(self)(self.dims, self.data.imag, self._attrs)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
return self.__array_wrap__(f(self.data, *args, **kwargs))
return func
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (xr.DataArray, xr.Dataset)):
return NotImplemented
self_data, other_data, dims = _broadcast_compat_data(self, other)
new_data = (f(self_data, other_data)
if not reflexive
else f(other_data, self_data))
result = Variable(dims, new_data)
return result
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, xr.Dataset):
raise TypeError('cannot add a Dataset to a Variable in-place')
self_data, other_data, dims = _broadcast_compat_data(self, other)
if dims != self.dims:
raise ValueError('dimensions cannot change for in-place '
'operations')
self.values = f(self_data, other_data)
return self
return func
ops.inject_all_ops_and_reduce_methods(Variable)
class IndexVariable(Variable):
"""Wrapper for accommodating a pandas.Index in an xarray.Variable.
IndexVariable preserve loaded values in the form of a pandas.Index instead
of a NumPy array. Hence, their values are immutable and must always be one-
dimensional.
They also have a name property, which is the name of their sole dimension
unless another name is given.
"""
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
super(IndexVariable, self).__init__(dims, data, attrs, encoding,
fastpath)
if self.ndim != 1:
raise ValueError('%s objects must be 1-dimensional' %
type(self).__name__)
# Unlike in Variable, always eagerly load values into memory
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
def load(self):
# data is already loaded into memory for IndexVariable
return self
@Variable.data.setter
def data(self, data):
Variable.data.fset(self, data)
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
def chunk(self, chunks=None, name=None, lock=False):
# Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()
return self.copy(deep=False)
def __getitem__(self, key):
key = self._item_key_to_tuple(key)
values = self._indexable_data[key]
if not hasattr(values, 'ndim') or values.ndim == 0:
return Variable((), values, self._attrs, self._encoding)
else:
return type(self)(self.dims, values, self._attrs,
self._encoding, fastpath=True)
def __setitem__(self, key, value):
raise TypeError('%s values cannot be modified' % type(self).__name__)
@classmethod
def concat(cls, variables, dim='concat_dim', positions=None,
shortcut=False):
"""Specialized version of Variable.concat for IndexVariable objects.
This exists because we want to avoid converting Index objects to NumPy
arrays, if possible.
"""
if not isinstance(dim, basestring):
dim, = dim.dims
variables = list(variables)
first_var = variables[0]
if any(not isinstance(v, cls) for v in variables):
raise TypeError('IndexVariable.concat requires that all input '
'variables be IndexVariable objects')
indexes = [v._data.array for v in variables]
if not indexes:
data = []
else:
data = indexes[0].append(indexes[1:])
if positions is not None:
indices = nputils.inverse_permutation(
np.concatenate(positions))
data = data.take(indices)
attrs = OrderedDict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError('inconsistent dimensions')
utils.remove_incompatible_items(attrs, var.attrs)
return cls(first_var.dims, data, attrs)
def copy(self, deep=True):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of pandas.Index,
which is already immutable. Dimensions, attributes and encodings are
always copied.
"""
return type(self)(self.dims, self._data, self._attrs,
self._encoding, fastpath=True)
def equals(self, other, equiv=None):
# if equiv is specified, super up
if equiv is not None:
return super(IndexVariable, self).equals(other, equiv)
# otherwise use the native index equals, rather than looking at _data
other = getattr(other, 'variable', other)
try:
return (self.dims == other.dims and
self._data_equals(other))
except (TypeError, AttributeError):
return False
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return self
to_coord = utils.alias(to_index_variable, 'to_coord')
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
index = self._data.array
if isinstance(index, pd.MultiIndex):
# set default names for multi-index unnamed levels so that
# we can safely rename dimension / coordinate later
valid_level_names = [name or '{}_level_{}'.format(self.dims[0], i)
for i, name in enumerate(index.names)]
index = index.set_names(valid_level_names)
else:
index = index.set_names(self.name)
return index
@property
def level_names(self):
"""Return MultiIndex level names or None if this IndexVariable has no
MultiIndex.
"""
index = self.to_index()
if isinstance(index, pd.MultiIndex):
return index.names
else:
return None
def get_level_variable(self, level):
"""Return a new IndexVariable from a given MultiIndex level."""
if self.level_names is None:
raise ValueError("IndexVariable %r has no MultiIndex" % self.name)
index = self.to_index()
return type(self)(self.dims, index.get_level_values(level))
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError('cannot modify name of IndexVariable in-place')
# for backwards compatibility
Coordinate = utils.alias(IndexVariable, 'Coordinate')
def _unified_dims(variables):
# validate dimensions
all_dims = OrderedDict()
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError('broadcasting cannot handle duplicate '
'dimensions: %r' % list(var_dims))
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError('operands cannot be broadcast together '
'with mismatched lengths for dimension %r: %s'
% (d, (all_dims[d], s)))
return all_dims
def _broadcast_compat_variables(*variables):
dims = tuple(_unified_dims(variables))
return tuple(var.set_dims(dims) if var.dims != dims else var
for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(var.set_dims(dims_map) if var.dims != dims_tuple else var
for var in variables)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr
in ['dims', 'data', 'shape', 'encoding']):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
def concat(variables, dim='concat_dim', positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Array
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
variables = list(variables)
if all(isinstance(v, IndexVariable) for v in variables):
return IndexVariable.concat(variables, dim, positions, shortcut)
else:
return Variable.concat(variables, dim, positions, shortcut)
def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
for var_name, var in variables.items():
if isinstance(var._data, PandasIndexAdapter):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append('%r (%s)' % (n, var_name))
for k, v in level_names.items():
if k in variables:
v.append('(%s)' % k)
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = '\n'.join([', '.join(v) for v in duplicate_names])
raise ValueError('conflicting MultiIndex level name(s):\n%s'
% conflict_str)
|
jhamman/xray
|
xarray/core/variable.py
|
Python
|
apache-2.0
| 54,258
|
[
"NetCDF"
] |
8f26af5b7c80ae8ba16a2b43ffb7e067bc31ec2d9e96109828a5d5d5471d198a
|
#!/usr/bin/python
# Copyright (C) 2011 Tianyang Li
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
"""
get ids of seqs that have alignments in db
"""
import sys
from HTSeq import SAM_Reader
def main(args):
seq_ids=set([])
for fin in args:
for align in SAM_Reader(fin):
if align.aligned:
seq_ids.add(align.iv.chrom)
for seq_id in seq_ids:
print seq_id
if __name__=='__main__':
main(sys.argv[1:])
|
tianyang-li/meta-transcriptome
|
get_sam_db_aligned.py
|
Python
|
gpl-3.0
| 986
|
[
"HTSeq"
] |
193b097dc34f9f32cefe13415f1a558cb0d1bb511f9d37b0e0d94e9acc08c726
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.codegen.protobuf.python.python_protobuf_subsystem import (
rules as protobuf_subsystem_rules,
)
from pants.backend.codegen.protobuf.python.rules import rules as protobuf_rules
from pants.backend.codegen.protobuf.target_types import ProtobufSourceTarget
from pants.backend.python import target_types_rules
from pants.backend.python.dependency_inference import rules as dependency_inference_rules
from pants.backend.python.target_types import (
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
PythonSourceTarget,
)
from pants.backend.python.typecheck.mypy.rules import (
MyPyFieldSet,
MyPyPartition,
MyPyPartitions,
MyPyRequest,
determine_python_files,
)
from pants.backend.python.typecheck.mypy.rules import rules as mypy_rules
from pants.backend.python.typecheck.mypy.subsystem import MyPy
from pants.backend.python.typecheck.mypy.subsystem import rules as mypy_subystem_rules
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.core.goals.check import CheckResult, CheckResults
from pants.core.util_rules import config_files
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_DIGEST, DigestContents
from pants.engine.rules import QueryRule
from pants.engine.target import Target
from pants.testutil.python_interpreter_selection import (
all_major_minor_python_versions,
skip_unless_python27_and_python3_present,
skip_unless_python27_present,
skip_unless_python38_present,
skip_unless_python39_present,
)
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*mypy_rules(),
*mypy_subystem_rules(),
*dependency_inference_rules.rules(), # Used for import inference.
*config_files.rules(),
*target_types_rules.rules(),
QueryRule(CheckResults, (MyPyRequest,)),
QueryRule(MyPyPartitions, (MyPyRequest,)),
],
target_types=[PythonSourcesGeneratorTarget, PythonRequirementTarget, PythonSourceTarget],
)
PACKAGE = "src/py/project"
GOOD_FILE = dedent(
"""\
def add(x: int, y: int) -> int:
return x + y
result = add(3, 3)
"""
)
BAD_FILE = dedent(
"""\
def add(x: int, y: int) -> int:
return x + y
result = add(2.0, 3.0)
"""
)
# This will fail if `--disallow-any-expr` is configured.
NEEDS_CONFIG_FILE = dedent(
"""\
from typing import Any, cast
x = cast(Any, "hello")
"""
)
def run_mypy(
rule_runner: RuleRunner, targets: list[Target], *, extra_args: list[str] | None = None
) -> tuple[CheckResult, ...]:
rule_runner.set_options(extra_args or (), env_inherit={"PATH", "PYENV_ROOT", "HOME"})
result = rule_runner.request(
CheckResults, [MyPyRequest(MyPyFieldSet.create(tgt) for tgt in targets)]
)
return result.results
def assert_success(
rule_runner: RuleRunner, target: Target, *, extra_args: list[str] | None = None
) -> None:
result = run_mypy(rule_runner, [target], extra_args=extra_args)
assert len(result) == 1
assert result[0].exit_code == 0
assert "Success: no issues found" in result[0].stdout.strip()
assert result[0].report == EMPTY_DIGEST
@pytest.mark.platform_specific_behavior
@pytest.mark.parametrize(
"major_minor_interpreter",
all_major_minor_python_versions(MyPy.default_interpreter_constraints),
)
def test_passing(rule_runner: RuleRunner, major_minor_interpreter: str) -> None:
rule_runner.write_files({f"{PACKAGE}/f.py": GOOD_FILE, f"{PACKAGE}/BUILD": "python_sources()"})
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
assert_success(
rule_runner,
tgt,
extra_args=[f"--mypy-interpreter-constraints=['=={major_minor_interpreter}.*']"],
)
def test_failing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({f"{PACKAGE}/f.py": BAD_FILE, f"{PACKAGE}/BUILD": "python_sources()"})
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
result = run_mypy(rule_runner, [tgt])
assert len(result) == 1
assert result[0].exit_code == 1
assert f"{PACKAGE}/f.py:4" in result[0].stdout
assert result[0].report == EMPTY_DIGEST
def test_multiple_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
f"{PACKAGE}/good.py": GOOD_FILE,
f"{PACKAGE}/bad.py": BAD_FILE,
f"{PACKAGE}/BUILD": "python_sources()",
}
)
tgts = [
rule_runner.get_target(Address(PACKAGE, relative_file_path="good.py")),
rule_runner.get_target(Address(PACKAGE, relative_file_path="bad.py")),
]
result = run_mypy(rule_runner, tgts)
assert len(result) == 1
assert result[0].exit_code == 1
assert f"{PACKAGE}/good.py" not in result[0].stdout
assert f"{PACKAGE}/bad.py:4" in result[0].stdout
assert "checked 2 source files" in result[0].stdout
assert result[0].report == EMPTY_DIGEST
@pytest.mark.parametrize(
"config_path,extra_args",
([".mypy.ini", []], ["custom_config.ini", ["--mypy-config=custom_config.ini"]]),
)
def test_config_file(rule_runner: RuleRunner, config_path: str, extra_args: list[str]) -> None:
rule_runner.write_files(
{
f"{PACKAGE}/f.py": NEEDS_CONFIG_FILE,
f"{PACKAGE}/BUILD": "python_sources()",
config_path: "[mypy]\ndisallow_any_expr = True\n",
}
)
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
result = run_mypy(rule_runner, [tgt], extra_args=extra_args)
assert len(result) == 1
assert result[0].exit_code == 1
assert f"{PACKAGE}/f.py:3" in result[0].stdout
def test_passthrough_args(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{f"{PACKAGE}/f.py": NEEDS_CONFIG_FILE, f"{PACKAGE}/BUILD": "python_sources()"}
)
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
result = run_mypy(rule_runner, [tgt], extra_args=["--mypy-args='--disallow-any-expr'"])
assert len(result) == 1
assert result[0].exit_code == 1
assert f"{PACKAGE}/f.py:3" in result[0].stdout
def test_skip(rule_runner: RuleRunner) -> None:
rule_runner.write_files({f"{PACKAGE}/f.py": BAD_FILE, f"{PACKAGE}/BUILD": "python_sources()"})
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
result = run_mypy(rule_runner, [tgt], extra_args=["--mypy-skip"])
assert not result
def test_report_file(rule_runner: RuleRunner) -> None:
rule_runner.write_files({f"{PACKAGE}/f.py": GOOD_FILE, f"{PACKAGE}/BUILD": "python_sources()"})
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
result = run_mypy(rule_runner, [tgt], extra_args=["--mypy-args='--linecount-report=reports'"])
assert len(result) == 1
assert result[0].exit_code == 0
assert "Success: no issues found" in result[0].stdout.strip()
report_files = rule_runner.request(DigestContents, [result[0].report])
assert len(report_files) == 1
assert "4 4 1 1 f" in report_files[0].content.decode()
def test_thirdparty_dependency(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": (
"python_requirement(name='more-itertools', requirements=['more-itertools==8.4.0'])"
),
f"{PACKAGE}/f.py": dedent(
"""\
from more_itertools import flatten
assert flatten(42) == [4, 2]
"""
),
f"{PACKAGE}/BUILD": "python_sources()",
}
)
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
result = run_mypy(rule_runner, [tgt])
assert len(result) == 1
assert result[0].exit_code == 1
assert f"{PACKAGE}/f.py:3" in result[0].stdout
def test_thirdparty_plugin(rule_runner: RuleRunner) -> None:
# NB: We install `django-stubs` both with `[mypy].extra_requirements` and
# `[mypy].extra_type_stubs`. This awkwardness is because its used both as a plugin and
# type stubs.
rule_runner.write_files(
{
f"{PACKAGE}/settings.py": dedent(
"""\
from django.urls import URLPattern
DEBUG = True
DEFAULT_FROM_EMAIL = "webmaster@example.com"
SECRET_KEY = "not so secret"
MY_SETTING = URLPattern(pattern="foo", callback=lambda: None)
"""
),
f"{PACKAGE}/app.py": dedent(
"""\
from django.utils import text
assert "forty-two" == text.slugify("forty two")
assert "42" == text.slugify(42)
"""
),
f"{PACKAGE}/BUILD": "python_sources()",
"mypy.ini": dedent(
"""\
[mypy]
plugins =
mypy_django_plugin.main
[mypy.plugins.django-stubs]
django_settings_module = project.settings
"""
),
}
)
result = run_mypy(
rule_runner,
[
rule_runner.get_target(Address(PACKAGE, relative_file_path="app.py")),
rule_runner.get_target(Address(PACKAGE, relative_file_path="settings.py")),
],
extra_args=[
"--mypy-extra-requirements=django-stubs==1.8.0",
"--mypy-extra-type-stubs=django-stubs==1.8.0",
"--mypy-version=mypy==0.812",
"--mypy-lockfile=<none>",
],
)
assert len(result) == 1
assert result[0].exit_code == 1
assert f"{PACKAGE}/app.py:4" in result[0].stdout
def test_transitive_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
f"{PACKAGE}/util/__init__.py": "",
f"{PACKAGE}/util/lib.py": dedent(
"""\
def capitalize(v: str) -> str:
return v.capitalize()
"""
),
f"{PACKAGE}/util/BUILD": "python_sources()",
f"{PACKAGE}/math/__init__.py": "",
f"{PACKAGE}/math/add.py": dedent(
"""\
from project.util.lib import capitalize
def add(x: int, y: int) -> str:
sum = x + y
return capitalize(sum) # This is the wrong type.
"""
),
f"{PACKAGE}/math/BUILD": "python_sources()",
f"{PACKAGE}/__init__.py": "",
f"{PACKAGE}/app.py": dedent(
"""\
from project.math.add import add
print(add(2, 4))
"""
),
f"{PACKAGE}/BUILD": "python_sources()",
}
)
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="app.py"))
result = run_mypy(rule_runner, [tgt])
assert len(result) == 1
assert result[0].exit_code == 1
assert f"{PACKAGE}/math/add.py:5" in result[0].stdout
@skip_unless_python27_present
def test_works_with_python27(rule_runner: RuleRunner) -> None:
"""A regression test that we can properly handle Python 2-only third-party dependencies.
There was a bug that this would cause the runner PEX to fail to execute because it did not have
Python 3 distributions of the requirements.
Also note that this Python 2 support should be automatic: Pants will tell MyPy to run with
`--py2` by detecting its use in interpreter constraints.
"""
rule_runner.write_files(
{
"BUILD": dedent(
"""\
# Both requirements are a) typed and b) compatible with Py2 and Py3. However, `x690`
# has a distinct wheel for Py2 vs. Py3, whereas libumi has a universal wheel. We expect
# both to be usable, even though libumi is not compatible with Py3.
python_requirement(
name="libumi",
requirements=["libumi==0.0.2"],
)
python_requirement(
name="x690",
requirements=["x690==0.2.0"],
)
"""
),
f"{PACKAGE}/f.py": dedent(
"""\
from libumi import hello_world
from x690 import types
print "Blast from the past!"
print hello_world() - 21 # MyPy should fail. You can't subtract an `int` from `bytes`.
"""
),
f"{PACKAGE}/BUILD": "python_sources(interpreter_constraints=['==2.7.*'])",
}
)
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
result = run_mypy(rule_runner, [tgt])
assert len(result) == 1
assert result[0].exit_code == 1
assert f"{PACKAGE}/f.py:5: error: Unsupported operand types" in result[0].stdout
# Confirm original issues not showing up.
assert "Failed to execute PEX file" not in result[0].stderr
assert (
"Cannot find implementation or library stub for module named 'x690'" not in result[0].stdout
)
assert (
"Cannot find implementation or library stub for module named 'libumi'"
not in result[0].stdout
)
@skip_unless_python38_present
def test_works_with_python38(rule_runner: RuleRunner) -> None:
"""MyPy's typed-ast dependency does not understand Python 3.8, so we must instead run MyPy with
Python 3.8 when relevant."""
rule_runner.write_files(
{
f"{PACKAGE}/f.py": dedent(
"""\
x = 0
if y := x:
print("x is truthy and now assigned to y")
"""
),
f"{PACKAGE}/BUILD": "python_sources(interpreter_constraints=['>=3.8'])",
}
)
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
assert_success(rule_runner, tgt)
@skip_unless_python39_present
def test_works_with_python39(rule_runner: RuleRunner) -> None:
"""MyPy's typed-ast dependency does not understand Python 3.9, so we must instead run MyPy with
Python 3.9 when relevant."""
rule_runner.write_files(
{
f"{PACKAGE}/f.py": dedent(
"""\
@lambda _: int
def replaced(x: bool) -> str:
return "42" if x is True else "1/137"
"""
),
f"{PACKAGE}/BUILD": "python_sources(interpreter_constraints=['>=3.9'])",
}
)
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
assert_success(rule_runner, tgt)
@skip_unless_python27_and_python3_present
def test_uses_correct_python_version(rule_runner: RuleRunner) -> None:
"""We set `--python-version` automatically for the user, and also batch based on interpreter
constraints.
This batching must consider transitive dependencies, so we use a more complex setup where the
dependencies are what have specific constraints that influence the batching.
"""
rule_runner.write_files(
{
f"{PACKAGE}/py2/__init__.py": dedent(
"""\
def add(x, y):
# type: (int, int) -> int
return x + y
"""
),
f"{PACKAGE}/py2/BUILD": "python_sources(interpreter_constraints=['==2.7.*'])",
f"{PACKAGE}/py3/__init__.py": dedent(
"""\
def add(x: int, y: int) -> int:
return x + y
"""
),
f"{PACKAGE}/py3/BUILD": "python_sources(interpreter_constraints=['>=3.6'])",
f"{PACKAGE}/__init__.py": "",
f"{PACKAGE}/uses_py2.py": "from project.py2 import add\nassert add(2, 2) == 4\n",
f"{PACKAGE}/uses_py3.py": "from project.py3 import add\nassert add(2, 2) == 4\n",
f"{PACKAGE}/BUILD": "python_sources(interpreter_constraints=['==2.7.*', '>=3.6'])",
}
)
py2_tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="uses_py2.py"))
py3_tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="uses_py3.py"))
result = run_mypy(rule_runner, [py2_tgt, py3_tgt])
assert len(result) == 2
py2_result, py3_result = sorted(result, key=lambda res: res.partition_description or "")
assert py2_result.exit_code == 0
assert py2_result.partition_description == "['CPython==2.7.*', 'CPython==2.7.*,>=3.6']"
assert "Success: no issues found" in py2_result.stdout
assert py3_result.exit_code == 0
assert py3_result.partition_description == "['CPython==2.7.*,>=3.6', 'CPython>=3.6']"
assert "Success: no issues found" in py3_result.stdout
def test_run_only_on_specified_files(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
f"{PACKAGE}/good.py": GOOD_FILE,
f"{PACKAGE}/bad.py": BAD_FILE,
f"{PACKAGE}/BUILD": dedent(
"""\
python_sources(name='good', sources=['good.py'], dependencies=[':bad'])
python_sources(name='bad', sources=['bad.py'])
"""
),
}
)
tgt = rule_runner.get_target(Address(PACKAGE, target_name="good", relative_file_path="good.py"))
assert_success(rule_runner, tgt)
def test_type_stubs(rule_runner: RuleRunner) -> None:
"""Test that first-party type stubs work for both first-party and third-party code."""
rule_runner.write_files(
{
"BUILD": "python_requirement(name='colors', requirements=['ansicolors'])",
"mypy_stubs/__init__.py": "",
"mypy_stubs/colors.pyi": "def red(s: str) -> str: ...",
"mypy_stubs/BUILD": "python_sources()",
f"{PACKAGE}/util/__init__.py": "",
f"{PACKAGE}/util/untyped.py": "def add(x, y):\n return x + y",
f"{PACKAGE}/util/untyped.pyi": "def add(x: int, y: int) -> int: ...",
f"{PACKAGE}/util/BUILD": "python_sources()",
f"{PACKAGE}/__init__.py": "",
f"{PACKAGE}/app.py": dedent(
"""\
from colors import red
from project.util.untyped import add
z = add(2, 2.0)
print(red(z))
"""
),
f"{PACKAGE}/BUILD": "python_sources()",
}
)
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="app.py"))
result = run_mypy(
rule_runner, [tgt], extra_args=["--source-root-patterns=['mypy_stubs', 'src/py']"]
)
assert len(result) == 1
assert result[0].exit_code == 1
assert f"{PACKAGE}/app.py:4: error: Argument 2 to" in result[0].stdout
assert f"{PACKAGE}/app.py:5: error: Argument 1 to" in result[0].stdout
def test_mypy_shadows_requirements(rule_runner: RuleRunner) -> None:
"""Test the behavior of a MyPy requirement shadowing a user's requirement.
The way we load requirements is complex. We want to ensure that things still work properly in
this edge case.
"""
rule_runner.write_files(
{
"BUILD": "python_requirement(name='ta', requirements=['typed-ast==1.4.1'])",
f"{PACKAGE}/f.py": "import typed_ast",
f"{PACKAGE}/BUILD": "python_sources()",
}
)
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
assert_success(
rule_runner, tgt, extra_args=["--mypy-version=mypy==0.782", "--mypy-lockfile=<none>"]
)
def test_source_plugin(rule_runner: RuleRunner) -> None:
# NB: We make this source plugin fairly complex by having it use transitive dependencies.
# This is to ensure that we can correctly support plugins with dependencies.
# The plugin changes the return type of functions ending in `__overridden_by_plugin` to have a
# return type of `None`.
plugin_file = dedent(
"""\
from typing import Callable, Optional, Type
from mypy.plugin import FunctionContext, Plugin
from mypy.types import NoneType, Type as MyPyType
from plugins.subdir.dep import is_overridable_function
from project.subdir.util import noop
noop()
class ChangeReturnTypePlugin(Plugin):
def get_function_hook(
self, fullname: str
) -> Optional[Callable[[FunctionContext], MyPyType]]:
return hook if is_overridable_function(fullname) else None
def hook(ctx: FunctionContext) -> MyPyType:
return NoneType()
def plugin(_version: str) -> Type[Plugin]:
return ChangeReturnTypePlugin
"""
)
rule_runner.write_files(
{
"BUILD": dedent(
f"""\
python_requirement(name='mypy', requirements=['{MyPy.default_version}'])
python_requirement(name="more-itertools", requirements=["more-itertools==8.4.0"])
"""
),
"pants-plugins/plugins/subdir/__init__.py": "",
"pants-plugins/plugins/subdir/dep.py": dedent(
"""\
from more_itertools import flatten
def is_overridable_function(name: str) -> bool:
assert list(flatten([[1, 2], [3, 4]])) == [1, 2, 3, 4]
return name.endswith("__overridden_by_plugin")
"""
),
"pants-plugins/plugins/subdir/BUILD": "python_sources()",
# The plugin can depend on code located anywhere in the project; its dependencies need
# not be in the same directory.
f"{PACKAGE}/subdir/__init__.py": "",
f"{PACKAGE}/subdir/util.py": "def noop() -> None:\n pass\n",
f"{PACKAGE}/subdir/BUILD": "python_sources()",
"pants-plugins/plugins/__init__.py": "",
"pants-plugins/plugins/change_return_type.py": plugin_file,
"pants-plugins/plugins/BUILD": "python_sources()",
f"{PACKAGE}/__init__.py": "",
f"{PACKAGE}/f.py": dedent(
"""\
def add(x: int, y: int) -> int:
return x + y
def add__overridden_by_plugin(x: int, y: int) -> int:
return x + y
result = add__overridden_by_plugin(1, 1)
assert add(result, 2) == 4
"""
),
f"{PACKAGE}/BUILD": "python_sources()",
"mypy.ini": dedent(
"""\
[mypy]
plugins =
plugins.change_return_type
"""
),
}
)
def run_mypy_with_plugin(tgt: Target) -> CheckResult:
result = run_mypy(
rule_runner,
[tgt],
extra_args=[
"--mypy-source-plugins=['pants-plugins/plugins']",
"--mypy-lockfile=<none>",
"--source-root-patterns=['pants-plugins', 'src/py']",
],
)
assert len(result) == 1
return result[0]
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
result = run_mypy_with_plugin(tgt)
assert result.exit_code == 1
assert f"{PACKAGE}/f.py:8" in result.stdout
# Ensure we don't accidentally check the source plugin itself.
assert "(checked 1 source file)" in result.stdout
# Ensure that running MyPy on the plugin itself still works.
plugin_tgt = rule_runner.get_target(
Address("pants-plugins/plugins", relative_file_path="change_return_type.py")
)
result = run_mypy_with_plugin(plugin_tgt)
assert result.exit_code == 0
assert "Success: no issues found in 1 source file" in result.stdout
def test_protobuf_mypy(rule_runner: RuleRunner) -> None:
rule_runner = RuleRunner(
rules=[*rule_runner.rules, *protobuf_rules(), *protobuf_subsystem_rules()],
target_types=[*rule_runner.target_types, ProtobufSourceTarget],
)
rule_runner.write_files(
{
"BUILD": ("python_requirement(name='protobuf', requirements=['protobuf==3.13.0'])"),
f"{PACKAGE}/__init__.py": "",
f"{PACKAGE}/proto.proto": dedent(
"""\
syntax = "proto3";
package project;
message Person {
string name = 1;
int32 id = 2;
string email = 3;
}
"""
),
f"{PACKAGE}/f.py": dedent(
"""\
from project.proto_pb2 import Person
x = Person(name=123, id="abc", email=None)
"""
),
f"{PACKAGE}/BUILD": dedent(
"""\
python_sources(dependencies=[':proto'])
protobuf_source(name='proto', source='proto.proto')
"""
),
}
)
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="f.py"))
result = run_mypy(
rule_runner,
[tgt],
extra_args=["--python-protobuf-mypy-plugin"],
)
assert len(result) == 1
assert 'Argument "name" to "Person" has incompatible type "int"' in result[0].stdout
assert 'Argument "id" to "Person" has incompatible type "str"' in result[0].stdout
assert result[0].exit_code == 1
def test_partition_targets(rule_runner: RuleRunner) -> None:
def create_folder(folder: str, resolve: str, interpreter: str) -> dict[str, str]:
return {
f"{folder}/dep.py": "",
f"{folder}/root.py": "",
f"{folder}/BUILD": dedent(
f"""\
python_source(
name='dep',
source='dep.py',
resolve='{resolve}',
interpreter_constraints=['=={interpreter}.*'],
)
python_source(
name='root',
source='root.py',
resolve='{resolve}',
interpreter_constraints=['=={interpreter}.*'],
dependencies=[':dep'],
)
"""
),
}
files = {
**create_folder("resolveA_py38", "a", "3.8"),
**create_folder("resolveA_py39", "a", "3.9"),
**create_folder("resolveB_1", "b", "3.9"),
**create_folder("resolveB_2", "b", "3.9"),
}
rule_runner.write_files(files) # type: ignore[arg-type]
rule_runner.set_options(
["--python-resolves={'a': '', 'b': ''}", "--python-enable-resolves"],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
resolve_a_py38_dep = rule_runner.get_target(Address("resolveA_py38", target_name="dep"))
resolve_a_py38_root = rule_runner.get_target(Address("resolveA_py38", target_name="root"))
resolve_a_py39_dep = rule_runner.get_target(Address("resolveA_py39", target_name="dep"))
resolve_a_py39_root = rule_runner.get_target(Address("resolveA_py39", target_name="root"))
resolve_b_dep1 = rule_runner.get_target(Address("resolveB_1", target_name="dep"))
resolve_b_root1 = rule_runner.get_target(Address("resolveB_1", target_name="root"))
resolve_b_dep2 = rule_runner.get_target(Address("resolveB_2", target_name="dep"))
resolve_b_root2 = rule_runner.get_target(Address("resolveB_2", target_name="root"))
request = MyPyRequest(
MyPyFieldSet.create(t)
for t in (
resolve_a_py38_root,
resolve_a_py39_root,
resolve_b_root1,
resolve_b_root2,
)
)
partitions = rule_runner.request(MyPyPartitions, [request])
assert len(partitions) == 3
def assert_partition(
partition: MyPyPartition, roots: list[Target], deps: list[Target], interpreter: str
) -> None:
root_addresses = {t.address for t in roots}
assert {t.address for t in partition.root_targets} == root_addresses
assert {t.address for t in partition.closure} == {
*root_addresses,
*(t.address for t in deps),
}
assert partition.interpreter_constraints == InterpreterConstraints([f"=={interpreter}.*"])
assert_partition(partitions[0], [resolve_a_py38_root], [resolve_a_py38_dep], "3.8")
assert_partition(partitions[1], [resolve_a_py39_root], [resolve_a_py39_dep], "3.9")
assert_partition(
partitions[2], [resolve_b_root1, resolve_b_root2], [resolve_b_dep1, resolve_b_dep2], "3.9"
)
def test_determine_python_files() -> None:
assert determine_python_files([]) == ()
assert determine_python_files(["f.py"]) == ("f.py",)
assert determine_python_files(["f.pyi"]) == ("f.pyi",)
assert determine_python_files(["f.py", "f.pyi"]) == ("f.pyi",)
assert determine_python_files(["f.pyi", "f.py"]) == ("f.pyi",)
assert determine_python_files(["f.json"]) == ()
|
pantsbuild/pants
|
src/python/pants/backend/python/typecheck/mypy/rules_integration_test.py
|
Python
|
apache-2.0
| 29,373
|
[
"BLAST"
] |
356bbec93c078f7cb1c01ff1a96b81a5607ca5e4212cf906a59f9910901f7551
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the :mod:`iris.tests.stock.netcdf` module."""
import shutil
import tempfile
from iris import load_cube
from iris.experimental.ugrid.load import PARSE_UGRID_ON_LOAD
from iris.experimental.ugrid.mesh import Mesh, MeshCoord
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.tests.stock import netcdf
class XIOSFileMixin(tests.IrisTest):
@classmethod
def setUpClass(cls):
# Create a temp directory for transient test files.
cls.temp_dir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
# Destroy the temp directory.
shutil.rmtree(cls.temp_dir)
def create_synthetic_file(self, **create_kwargs):
# Should be overridden to invoke one of the create_file_ functions.
# E.g.
# return netcdf.create_file__xios_2d_face_half_levels(
# temp_file_dir=self.temp_dir, dataset_name="mesh", **create_kwargs
# )
raise NotImplementedError
def create_synthetic_test_cube(self, **create_kwargs):
file_path = self.create_synthetic_file(**create_kwargs)
with PARSE_UGRID_ON_LOAD.context():
cube = load_cube(file_path)
return cube
def check_cube(self, cube, shape, location, level):
# Basic checks on the primary data cube.
self.assertEqual(cube.var_name, "thing")
self.assertEqual(cube.long_name, "thingness")
self.assertEqual(cube.shape, shape)
# Also a few checks on the attached mesh-related information.
last_dim = cube.ndim - 1
self.assertIsInstance(cube.mesh, Mesh)
self.assertEqual(cube.mesh_dim(), last_dim)
self.assertEqual(cube.location, location)
for coord_name in ("longitude", "latitude"):
coord = cube.coord(coord_name)
self.assertIsInstance(coord, MeshCoord)
self.assertEqual(coord.shape, (shape[last_dim],))
self.assertTrue(cube.mesh.var_name.endswith(f"{level}_levels"))
class Test_create_file__xios_2d_face_half_levels(XIOSFileMixin):
def create_synthetic_file(self, **create_kwargs):
return netcdf.create_file__xios_2d_face_half_levels(
temp_file_dir=self.temp_dir, dataset_name="mesh", **create_kwargs
)
def test_basic_load(self):
cube = self.create_synthetic_test_cube()
self.check_cube(cube, shape=(1, 866), location="face", level="half")
def test_scale_mesh(self):
cube = self.create_synthetic_test_cube(n_faces=10)
self.check_cube(cube, shape=(1, 10), location="face", level="half")
def test_scale_time(self):
cube = self.create_synthetic_test_cube(n_times=3)
self.check_cube(cube, shape=(3, 866), location="face", level="half")
class Test_create_file__xios_3d_face_half_levels(XIOSFileMixin):
def create_synthetic_file(self, **create_kwargs):
return netcdf.create_file__xios_3d_face_half_levels(
temp_file_dir=self.temp_dir, dataset_name="mesh", **create_kwargs
)
def test_basic_load(self):
cube = self.create_synthetic_test_cube()
self.check_cube(
cube, shape=(1, 38, 866), location="face", level="half"
)
def test_scale_mesh(self):
cube = self.create_synthetic_test_cube(n_faces=10)
self.check_cube(cube, shape=(1, 38, 10), location="face", level="half")
def test_scale_time(self):
cube = self.create_synthetic_test_cube(n_times=3)
self.check_cube(
cube, shape=(3, 38, 866), location="face", level="half"
)
def test_scale_levels(self):
cube = self.create_synthetic_test_cube(n_levels=10)
self.check_cube(
cube, shape=(1, 10, 866), location="face", level="half"
)
class Test_create_file__xios_3d_face_full_levels(XIOSFileMixin):
def create_synthetic_file(self, **create_kwargs):
return netcdf.create_file__xios_3d_face_full_levels(
temp_file_dir=self.temp_dir, dataset_name="mesh", **create_kwargs
)
def test_basic_load(self):
cube = self.create_synthetic_test_cube()
self.check_cube(
cube, shape=(1, 39, 866), location="face", level="full"
)
def test_scale_mesh(self):
cube = self.create_synthetic_test_cube(n_faces=10)
self.check_cube(cube, shape=(1, 39, 10), location="face", level="full")
def test_scale_time(self):
cube = self.create_synthetic_test_cube(n_times=3)
self.check_cube(
cube, shape=(3, 39, 866), location="face", level="full"
)
def test_scale_levels(self):
cube = self.create_synthetic_test_cube(n_levels=10)
self.check_cube(
cube, shape=(1, 10, 866), location="face", level="full"
)
if __name__ == "__main__":
tests.main()
|
bjlittle/iris
|
lib/iris/tests/unit/tests/stock/test_netcdf.py
|
Python
|
lgpl-3.0
| 5,089
|
[
"NetCDF"
] |
b0fdb79b34aeb979488de63a256cb20bab7687b1500490d0332e0adfedaa6713
|
#from opengmcore import _opengmcore.adder as adder
from opengmcore import *
from __version__ import version
from functionhelper import *
from _inf_param import _MetaInfParam , InfParam
from _visu import visualizeGm
from _misc import defaultAccumulator
from __version__ import version
import time
from _inference_interface_generator import _inject_interface , InferenceBase
import inference
import hdf5
import benchmark
# initialize solver/ inference dictionaries
_solverDicts=[
(inference.adder.minimizer.solver.__dict__ , 'adder', 'minimizer' ),
(inference.adder.maximizer.solver.__dict__, 'adder', 'maximizer' ),
(inference.multiplier.integrator.solver.__dict__,'adder', 'integrator'),
(inference.multiplier.minimizer.solver.__dict__, 'multiplier', 'minimizer' ),
(inference.multiplier.maximizer.solver.__dict__, 'multiplier', 'maximizer' ),
(inference.multiplier.integrator.solver.__dict__,'multiplier', 'integrator')
]
for infClass,infName in _inject_interface(_solverDicts):
inference.__dict__[infName]=infClass
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
if self.name:
print '[%s]' % self.name
self.tstart = time.time()
def __exit__(self, type, value, traceback):
#if self.name:
# print '[%s]' % self.name,
print ' Elapsed: %s' % (time.time() - self.tstart)
def saveGm(gm,f,d='gm'):
""" save a graphical model to a hdf5 file:
Args:
gm : graphical model to save
f : filepath
g : dataset (defaut : 'gm')
"""
hdf5.saveGraphicalModel(f,d)
def loadGm(f,d='gm',operator='adder'):
""" save a graphical model to a hdf5 file:
Args:
f : filepath
g : dataset (defaut : 'gm')
operator : operator of the graphical model ('adder' / 'multiplier')
"""
if(operator=='adder'):
gm=adder.GraphicalModel()
elif(operator=='multiplier'):
gm=multiplier.GraphicalModel()
else:
raise RuntimeError("unknown operator: "+ operator)
hdf5.loadGraphicalModel(gm,f,d)
return gm
class TestModels(object):
@staticmethod
def chain3(nVar,nLabels):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-2):
f=numpy.random.rand(nLabels,nLabels,nLabels)
model.addFactor(model.addFunction(f),[x0,x0+1,x0+2])
return model
@staticmethod
def chain4(nVar,nLabels):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-3):
f=numpy.random.rand(nLabels,nLabels,nLabels,nLabels)
model.addFactor(model.addFunction(f),[x0,x0+1,x0+2,x0+3])
return model
@staticmethod
def chainN(nVar,nLabels,order,nSpecialUnaries=0,beta=1.0):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
for sn in range(nSpecialUnaries):
r=int(numpy.random.rand(1)*nVar-1)
rl=int(numpy.random.rand(1)*nLabels-1)
unaries[r,rl]=0.0
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-(order-1)):
f=numpy.random.rand( *([nLabels]*order))
f*=beta
vis=numpy.arange(order)
vis+=x0
model.addFactor(model.addFunction(f),vis)
return model
@staticmethod
def secondOrderGrid(dx,dy,nLabels):
nVar=dx*dy
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
vis2Order=secondOrderGridVis(dx,dy,True)
nF2=len(vis2Order)#.shape[0]
f2s=numpy.random.rand(nF2,nLabels)
model.addFactors(model.addFunctions(f2s),vis2Order)
return model
class GenericTimingVisitor(object):
def __init__(self,visitNth=1,reserve=0,verbose=True,multiline=True):
self.visitNth=visitNth
self.reserve=reserve
self.verbose=verbose
self.multiline=multiline
self.values_ = None
self.runtimes_ = None
self.bounds_ = None
self.iterations_ = None
self.t0 = None
self.t1 = None
self.iterNr = 0
def getValues(self):
return numpy.require(self.values_,dtype=value_type)
def getTimes(self):
return numpy.require(self.runtimes_,dtype=value_type)
def getBounds(self):
return numpy.require(self.bounds_,dtype=value_type)
def getIterations(self):
return numpy.require(self.iterations_,dtype=value_type)
def begin(self,inf):
v = inf.value()
b = inf.bound()
self.values_ =[v]
self.bounds_ =[b]
self.runtimes_ =[0.0]
self.iterations_=[self.iterNr]
if self.verbose :
print 'Begin : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# start the timing
self.t0 =time.time()
self.t1 =time.time()
def visit(self,inf):
if(self.iterNr==0 or self.iterNr%self.visitNth==0):
# "stop the timing"
self.t1=time.time()
# get the runtime of the run
rt=self.t1-self.t0
v = inf.value()
b = inf.bound()
if self.verbose :
print 'Step : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# store results
self.values_.append(v)
self.bounds_.append(b)
self.runtimes_.append(rt)
self.iterations_.append(self.iterNr)
# increment iteration number
self.iterNr+=1
# restart the timing
self.t0=time.time()
else:
# increment iteration number
self.iterNr+=1
def end(self,inf):
# "stop the timing"
self.t1=time.time()
# get the runtime of the run
rt=self.t1-self.t0
v = inf.value()
b = inf.bound()
if self.verbose :
print 'End : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# store results
self.values_.append(v)
self.bounds_.append(b)
self.runtimes_.append(rt)
self.iterations_.append(self.iterNr)
class __ChainedInf__(object):
def __init__(self,gm,accumulator=None,parameter=InfParam()):
print "fresh constructor "
if accumulator is None:
self.accumulator=defaultAccumulator(gm=gm)
else:
self.accumulator=accumulator
kwargs=parameter.kwargs
self.gm_=gm
self.solverList = kwargs.get('solvers', [])
self.parameterList = kwargs.get('parameters', [])
self.arg_ = numpy.zeros(gm.numberOfVariables,dtype=numpy.uint64)
def timingVisitor(self,visitNth=1,reserve=0,verbose=True,multiline=True):
return GenericTimingVisitor(visitNth,reserve,verbose,multiline)
def infer(self,visitor=None):
print "CINNNNF"
for index,(cls,infParm) in enumerate(zip(self.solverList,self.parameterList)):
print "construct solver"
solver=cls(gm=self.gm_,accumulator=self.accumulator,parameter=infParm)
print "inference"
solverTv=solver.timingVisitor(verbose=True,visitNth=100)
if(index>0):
solver.setStartingPoint(self.arg_)
solver.infer(solverTv)
self.arg_=solver.arg()
if(index==0):
print "first solver"
visitor.values_ =solverTv.getValues()
visitor.runtimes_ =solverTv.getTimes()
visitor.bounds_ =solverTv.getBounds()
visitor.iterations_ =solverTv.getIterations()
else:
print "NOOOOOT first solver"
assert visitor.runtimes_ is not None
visitor.values_ =numpy.append(visitor.values_, solverTv.getValues())
visitor.runtimes_ =numpy.append(visitor.runtimes_, solverTv.getTimes())
visitor.bounds_ =numpy.append(visitor.bounds_, solverTv.getBounds())
visitor.iterations_ =numpy.append(visitor.iterations_, solverTv.getIterations())
assert visitor.runtimes_ is not None
print "CINNNNF DOOOOONE"
print "da rt",visitor.runtimes_[0]
def arg(self):
return self.arg_
def value(self):
return self.gm_.evaluate(self.arg_)
class __RandomFusion__(object):
def __init__(self,gm,accumulator=None,parameter=InfParam()):
if accumulator is None:
self.accumulator=defaultAccumulator(gm=gm)
else:
self.accumulator=accumulator
kwargs=parameter.kwargs
self.gm_=gm
self.steps = kwargs.get('steps', 100)
self.fusionSolver = kwargs.get('fuisionSolver', 'lf2')
self.arg_ = None
self.value_ = None
self.fusionMover=inference.adder.minimizer.FusionMover(self.gm_)
self.nLabels = self.gm_.numberOfLabels(0)
self.nVar = self.gm_.numberOfVariables
def timingVisitor(self,visitNth=1,reserve=0,verbose=True,multiline=True):
return GenericTimingVisitor(visitNth,reserve,verbose,multiline)
def setStartingPoint(self,arg):
self.arg_=arg
self.value_=gm.evaluate(self.arg_)
def infer(self,visitor=None):
if(self.arg_ is None):
self.arg_ = numpy.zeros(self.gm_.numberOfVariables,dtype=label_type)
self.value_ = self.value_=self.gm_.evaluate(self.arg_)
# start inference
if visitor is not None:
visitor.begin(self)
# start fusion moves
for x in range(self.steps):
randState=numpy.random.randint(low=0, high=self.nLabels, size=self.nVar).astype(label_type)
r = self.fusionMover.fuse(self.arg_,randState,self.fusionSolver)
self.arg_=r[0]
self.value_=r[1]
visitor.visit(self)
# end inference
if visitor is not None:
visitor.end(self)
def name(self):
return "RandomFusion"
def bound(self):
return -1.0*float('inf')
def arg(self):
return self.arg_
def value(self):
return self.value_
class __CheapInitialization__(object):
def __init__(self,gm,accumulator=None,parameter=InfParam()):
if accumulator is None:
self.accumulator=defaultAccumulator(gm=gm)
else:
self.accumulator=accumulator
kwargs=parameter.kwargs
self.gm_=gm
self.arg_ = None
self.value_ = None
self.initType = kwargs.get('initType', 'localOpt')
def timingVisitor(self,visitNth=1,reserve=0,verbose=True,multiline=True):
return GenericTimingVisitor(visitNth,reserve,verbose,multiline)
def setStartingPoint(self,arg):
self.arg_=arg
self.value_=gm.evaluate(self.arg_)
def infer(self,visitor=None):
if(self.arg_ is None):
self.arg_ = numpy.zeros(self.gm_.numberOfVariables,dtype=label_type)
self.value_ = self.value_=self.gm_.evaluate(self.arg_)
# start inference
if visitor is not None:
visitor.begin(self)
if(self.initType=='localOpt'):
print "move local opt"
self.arg_ = self.gm_.moveLocalOpt('minimizer')
print "done"
visitor.visit(self)
# end inference
if visitor is not None:
visitor.end(self)
def name(self):
return "CheapInitialization"
def bound(self):
return -1.0*float('inf')
def arg(self):
return self.arg_
def value(self):
return self.value_
inference.__dict__['CheapInitialization']=__CheapInitialization__
inference.__dict__['ChainedInf']=__ChainedInf__
inference.__dict__['RandomFusion']=__RandomFusion__
if __name__ == "__main__":
pass
|
yanlend/opengm
|
src/interfaces/python/opengm/__init__.py
|
Python
|
mit
| 12,141
|
[
"VisIt"
] |
3daa85a5e872d504d2f97b0db7c5099423f9937f48d686fca87ea1b4f9fde61c
|
#!/usr/bin/python
"""
Copyright 2016 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import MySQLdb
import dbInfo
import optparse
import smtplib
from mailer import Mailer
from mailer import Message
from smtplib import SMTPRecipientsRefused
import time
from datetime import timedelta, datetime
import mailInfo
sys.path.append("galaxyharvester.net")
sys.path.append("html")
import ghNames
import serverBest
import dbShared
def ghConn():
conn = MySQLdb.connect(host = dbInfo.DB_HOST,
db = dbInfo.DB_NAME,
user = dbInfo.DB_USER,
passwd = dbInfo.DB_PASS)
conn.autocommit(True)
return conn
# Creates alert records for specified alert types
def addAlert(userID, alertTypes, msgText, link, alertTitle):
msgText = dbShared.dbInsertSafe(msgText)
alertTitle = dbShared.dbInsertSafe(alertTitle)
if len(msgText) + len(alertTitle) + 3 > 1023:
# Truncate the message so it will fit
msgText = msgText[:(1020 - len(alertTitle))]
msgText = msgText[:msgText[:-9].rfind("\n")]
msgText = msgText + "\n more..."
conn = ghConn()
cursor = conn.cursor()
if (alertTypes % 2 == 1):
cursor.execute("".join(("INSERT INTO tAlerts (userID, alertType, alertTime, alertMessage, alertLink, alertStatus) VALUES ('", userID, "', 1, NOW(), '", alertTitle, " - ", msgText, "', '", link, "', 0);")))
homeid = cursor.lastrowid
if (alertTypes >= 4):
cursor.execute("".join(("INSERT INTO tAlerts (userID, alertType, alertTime, alertMessage, alertLink, alertStatus) VALUES ('", userID, "', 4, NOW(), '", alertTitle, " - ", msgText, "', '", link, "', 0);")))
mobileid = cursor.lastrowid
if (alertTypes != 1 and alertTypes != 4 and alertTypes != 5):
cursor.execute("".join(("INSERT INTO tAlerts (userID, alertType, alertTime, alertMessage, alertLink, alertStatus) VALUES ('", userID, "', 2, NOW(), '", alertTitle, " - ", msgText, "', '", link, "', 0);")))
emailid = cursor.lastrowid
cursor.close()
sendAlertMail(conn, userID, msgText, link, emailid, alertTitle)
else:
cursor.close()
def sendAlertMail(conn, userID, msgText, link, alertID, alertTitle):
# Don't try to send mail if we exceeded quota within last hour
lastFailureTime = datetime(2000, 1, 1, 12)
currentTime = datetime.fromtimestamp(time.time())
timeSinceFailure = currentTime - lastFailureTime
try:
f = open("last_email_failure.txt")
lastFailureTime = datetime.strptime(f.read().strip(), "%Y-%m-%d %H:%M:%S")
f.close()
timeSinceFailure = currentTime - lastFailureTime
except IOError as e:
sys.stdout.write("No last failure time\n")
if timeSinceFailure.days < 1 and timeSinceFailure.seconds < 3660:
return 1
# look up the user email
cursor = conn.cursor()
cursor.execute("SELECT emailAddress FROM tUsers WHERE userID='" + userID + "';")
row = cursor.fetchone()
if row == None:
result = "bad username"
else:
email = row[0]
if (email.find("@") > -1):
# send message
message = Message(From="\"Galaxy Harvester Alerts\" <alert@galaxyharvester.net>",To=email)
message.Subject = "".join(("Galaxy Harvester ", alertTitle))
message.Body = "".join(("Hello ", userID, ",\n\n", msgText, "\n\n", link, "\n\n You can manage your alerts at http://galaxyharvester.net/myAlerts.py\n"))
message.Html = "".join(("<div><img src='http://galaxyharvester.net/images/ghLogoLarge.png'/></div><p>Hello ", userID, ",</p><br/><p>", msgText.replace("\n", "<br/>"), "</p><p><a style='text-decoration:none;' href='", link, "'><div style='width:170px;font-size:18px;font-weight:600;color:#feffa1;background-color:#003344;padding:8px;margin:4px;border:1px solid black;'>View in Galaxy Harvester</div></a><br/>or copy and paste link: ", link, "</p><br/><p>You can manage your alerts at <a href='http://galaxyharvester.net/myAlerts.py'>http://galaxyharvester.net/myAlerts.py</a></p><p>-Galaxy Harvester Administrator</p>"))
mailer = Mailer(mailInfo.MAIL_HOST)
mailer.login(mailInfo.ALERTMAIL_USER, mailInfo.MAIL_PASS)
try:
mailer.send(message)
result = 'email sent'
except SMTPRecipientsRefused as e:
result = 'email failed'
sys.stderr.write('Email failed - ' + str(e))
trackEmailFailure(datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S"))
# update alert status
if ( result == 'email sent' ):
cursor.execute('UPDATE tAlerts SET alertStatus=1, statusChanged=NOW() WHERE alertID=' + str(alertID) + ';')
else:
result = 'Invalid email.'
cursor.close()
def checkSpawnAlerts(conn, spawnName, alertValue, galaxy, enteredBy, stats, galaxyName):
# array of stat titles for making message
statNames = ["CR","CD","DR","FL","HR","MA","PE","OQ","SR","UT","ER"]
# open filters for the type
cursor = conn.cursor()
cursor.execute("SELECT userID, alertTypes, CRmin, CDmin, DRmin, FLmin, HRmin, MAmin, PEmin, OQmin, SRmin, UTmin, ERmin, fltType, fltValue FROM tFilters WHERE galaxy=" + str(galaxy) + " AND alertTypes > 0 AND ((fltType = 1 AND fltValue = '" + alertValue + "') OR (fltType = 2 AND '" + alertValue + "' IN (SELECT resourceType FROM tResourceTypeGroup WHERE resourceGroup=fltValue)))")
row = cursor.fetchone()
# check each filter for this resource type to see if min stats hit
while row != None:
sendAlert = True
statStr = ""
for x in range(11):
if (row[x+2]) > 0:
if (stats[x] < row[x+2]):
sendAlert = False
else:
statStr = statStr + statNames[x] + ": " + str(stats[x]) + ", "
if len(statStr) > 1:
statStr = statStr[:-2]
# add alert records if no stats were under min
if sendAlert:
# Look up the name for the alert value
typeGroup = row[14]
if row[13] == 1:
typeGroup = ghNames.getResourceTypeName(row[14])
else:
typeGroup = ghNames.getResourceGroupName(row[14])
addAlert(row[0], row[1], typeGroup + ' named ' + spawnName + ' added to ' + galaxyName + ' with stats ' + statStr, 'http://galaxyharvester.net/resource.py/' + str(galaxy) + '/' + spawnName, 'Resource Spawn Alert')
row = cursor.fetchone()
cursor.close()
def checkDespawnAlerts(conn, spawnID, spawnName, galaxyName, unavailable, galaxy):
cursor = conn.cursor()
cursor.execute('SELECT userID, despawnAlert FROM tFavorites WHERE itemID={0} AND despawnAlert > 0;'.format(spawnID))
row = cursor.fetchone()
while row != None:
addAlert(row[0], row[1], 'Resource named ' + spawnName + ' on ' + galaxyName + ' despawned at ' + str(unavailable), 'http://galaxyharvester.net/resource.py/' + str(galaxy) + '/' + spawnName, 'Resource Despawn Alert')
row = cursor.fetchone()
cursor.close()
def checkServerBest(conn, spawnID, spawnName, galaxy):
result = serverBest.checkSpawn(spawnID)
for x in range(len(result[1])):
schematicStr = ''
bestStr = ''
for k, v in result[1][x].iteritems():
quoteSchem = "".join(("'", k, "'"))
schematicStr = ','.join((schematicStr, quoteSchem))
bestStr = '\n'.join((bestStr, '\n'.join(v)))
if schematicStr > 0:
schematicStr = schematicStr[1:]
# open people with favorites for the professions involved
cursor = conn.cursor()
cursor.execute("SELECT tFavorites.userID, defaultAlertTypes, profName FROM tFavorites INNER JOIN tUsers ON tFavorites.userID = tUsers.userID INNER JOIN tProfession ON tFavorites.itemID = tProfession.profID WHERE galaxy={1} AND favType=3 AND itemID={0} GROUP BY tFavorites.userID, defaultAlertTypes, profName;".format(result[0][x], galaxy))
row = cursor.fetchone()
# Add alert for each user watching for profession server bests hit by this spawn
while row != None:
addAlert(row[0], row[1], bestStr, ''.join(('http://galaxyharvester.net/resource.py/', str(galaxy), '/', spawnName)), ''.join((row[2], ' Server Best Alert')))
row = cursor.fetchone()
cursor.close()
# open people with favorites for the schematics involved
cursor = conn.cursor()
cursor.execute("SELECT tFavorites.userID, defaultAlertTypes, schematicID, schematicName FROM tFavorites INNER JOIN tUsers ON tFavorites.userID = tUsers.userID INNER JOIN tSchematic ON tFavorites.favGroup = tSchematic.schematicID WHERE galaxy={1} AND favType=4 AND favGroup IN ({0}) GROUP BY tFavorites.userID, defaultAlertTypes, schematicID, schematicName;".format(schematicStr, galaxy))
row = cursor.fetchone()
# Add alert for each user watching for schematic server bests hit by this spawn
while row != None:
addAlert(row[0], row[1], '\n'.join(result[1][x][row[2]]), ''.join(('http://galaxyharvester.net/resource.py/', str(galaxy), '/', spawnName)), ''.join((row[3], ' Server Best Alert')))
row = cursor.fetchone()
cursor.close()
def checkDespawnReputation(conn, spawnID, spawnName, entered, galaxy):
# open events for this despawned resource
users = {}
lastEventTime = None
alreadyRemovedFlag = False
editedFlag = False
cursor = conn.cursor()
cursor.execute("SELECT galaxy, userID, eventTime, eventType, planetID, eventDetail FROM tResourceEvents WHERE spawnID={0} ORDER BY eventTime DESC;".format(spawnID))
row = cursor.fetchone()
if row != None:
lastEventTime = row[2]
# Summarize reputation bonus for each user involved
while row != None:
if row[1] not in users:
users[row[1]] = 0
if row[3] == 'a':
if editedFlag == False:
users[row[1]] = users[row[1]] + 3
else:
users[row[1]] = users[row[1]] + 1
if row[3] == 'p':
users[row[1]] = users[row[1]] + 1
if row[3] == 'v':
users[row[1]] = users[row[1]] + 2
if row[3] == 'r':
users[row[1]] = users[row[1]] + 1
if row[3] == 'r' and row[4] == 0:
users[row[1]] = users[row[1]] + 2
if row[3] == 'e':
users[row[1]] = users[row[1]] + 2
editedFlag = True
if row[3] == 'w':
users[row[1]] = users[row[1]] + 2
if row[3] == 'n':
users[row[1]] = users[row[1]] + 2
if row[3] == 'g':
users[row[1]] = users[row[1]] + 2
if row[5] == 'previously unavailable':
alreadyRemovedFlag = True
row = cursor.fetchone()
cursor.close()
if lastEventTime != None and alreadyRemovedFlag == False:
timeSinceEntered = lastEventTime - entered
tmpDays = timeSinceEntered.days
# If resource has not been available for at least a few days its being removed prematurely and not valid for rep awards
if tmpDays > 3:
link = "/resource.py/" + str(galaxy) + "/" + spawnName
message = "You gained reputation for your contribution to tracking resource " + spawnName + "!"
for k, v in users.iteritems():
# Award rep for users contributing at least "4 points" and exclude automated users
if v >= 4 and k != "etas" and k != "default" and k != "c0pp3r":
dbShared.logEvent("INSERT INTO tUserEvents (userID, targetType, targetID, eventType, eventTime) VALUES ('" + k + "', 'r', " + str(spawnID) + ", '+', NOW());", "+", k, galaxy, spawnID)
cursor = conn.cursor()
cursor.execute("INSERT INTO tAlerts (userID, alertType, alertTime, alertMessage, alertLink, alertStatus) VALUES ('" + k + "', 1, NOW(), '" + message + "', '" + link + "', 0);")
cursor.close()
def main():
conn = ghConn()
# First try sending any backed up alert mails
retryPendingMail(conn)
f = None
lastAddedCheckTime = ""
lastRemovedCheckTime = ""
try:
f = open("last_alerts_check_added.txt")
lastAddedCheckTime = f.read().strip()
f.close()
except IOError as e:
sys.stdout.write("No last added check time\n")
try:
f = open("last_alerts_check_removed.txt")
lastRemovedCheckTime = f.read().strip()
f.close()
except IOError as e:
sys.stdout.write("No last removed check time\n")
# Check for despawn alerts
checkRemovedStart = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S")
if lastRemovedCheckTime == "":
sys.stderr.write("Skipping removed check.\n")
else:
# look up the despawn info
cursor = conn.cursor()
cursor.execute("SELECT spawnName, galaxy, enteredBy, resourceType, CR, CD, DR, FL, HR, MA, PE, OQ, SR, UT, ER, galaxyName, unavailable, spawnID, entered FROM tResources INNER JOIN tGalaxy ON tResources.galaxy = tGalaxy.galaxyID WHERE unavailable >= '" + lastRemovedCheckTime + "';")
row = cursor.fetchone()
while row != None:
spawnName = row[0]
galaxyName = row[15]
unavailable = row[16]
checkDespawnAlerts(conn, row[17], spawnName, galaxyName, unavailable, row[1])
checkDespawnReputation(conn, row[17], row[0], row[18], row[1])
row = cursor.fetchone()
cursor.close()
# Update tracking file
try:
f = open("last_alerts_check_removed.txt", "w")
f.write(checkRemovedStart)
f.close()
except IOError as e:
sys.stderr.write("Could not write removed tracking file")
# Check for spawn and server best alerts
checkAddedStart = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S")
if lastAddedCheckTime == "":
sys.stderr.write("Skipping added check.\n")
else:
# look up the spawn info
cursor = conn.cursor()
cursor.execute("SELECT spawnName, galaxy, enteredBy, resourceType, CR, CD, DR, FL, HR, MA, PE, OQ, SR, UT, ER, galaxyName, unavailable, spawnID FROM tResources INNER JOIN tGalaxy ON tResources.galaxy = tGalaxy.galaxyID WHERE entered >= '" + lastAddedCheckTime + "';")
row = cursor.fetchone()
while row != None:
alertValue = row[3]
galaxy = row[1]
spawnName = row[0]
enteredBy = row[2]
stats = [row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14]]
galaxyName = row[15]
checkSpawnAlerts(conn, spawnName, alertValue, galaxy, enteredBy, stats, galaxyName)
checkServerBest(conn, row[17], spawnName, galaxy)
row = cursor.fetchone()
cursor.close()
conn.close()
# Update tracking file
try:
f = open("last_alerts_check_added.txt", "w")
f.write(checkAddedStart)
f.close()
except IOError as e:
sys.stderr.write("Could not write added tracking file")
def trackEmailFailure(failureTime):
# Update tracking file
try:
f = open("last_email_failure.txt", "w")
f.write(failureTime)
f.close()
except IOError as e:
sys.stderr.write("Could not write email failure tracking file")
def retryPendingMail(conn):
# open email alerts that have not been sucessfully sent less than 48 hours old
minTime = datetime.fromtimestamp(time.time()) - timedelta(days=2)
cursor = conn.cursor()
cursor.execute("SELECT userID, alertTime, alertMessage, alertLink, alertID FROM tAlerts WHERE alertType=2 AND alertStatus=0 and alertTime > '" + minTime.strftime("%Y-%m-%d %H:%M:%S") + "';")
row = cursor.fetchone()
# try to send as long as not exceeding quota
while row != None:
fullText = row[2]
splitPos = fullText.find(" - ")
alertTitle = fullText[:splitPos]
alertBody = fullText[splitPos+3:]
result = sendAlertMail(conn, row[0], alertBody, row[3], row[4], alertTitle)
if result == 1:
sys.stderr.write("Delayed retrying rest of mail since quota reached.\n")
break
row = cursor.fetchone()
cursor.close()
if __name__ == "__main__":
main()
|
druss316/G-Harvestor
|
checkAlerts.py
|
Python
|
gpl-3.0
| 15,402
|
[
"Galaxy"
] |
43129fb2c58f896a75523818aa41de43035dd50ebd6fb087e7e3899794c41aa7
|
from sympy.physics.matrices import msigma, mgamma, minkowski_tensor, pat_matrix
from sympy import zeros, eye, I, Matrix
def test_parallel_axis_theorem():
# This tests the parallel axis theorem matrix by comparing to test
# matrices.
# First case, 1 in all directions.
mat1 = Matrix(((2, -1, -1), (-1, 2, -1), (-1, -1, 2)))
assert pat_matrix(1, 1, 1, 1) == mat1
assert pat_matrix(2, 1, 1, 1) == 2*mat1
# Second case, 1 in x, 0 in all others
mat2 = Matrix(((0, 0, 0), (0, 1, 0), (0, 0, 1)))
assert pat_matrix(1, 1, 0, 0) == mat2
assert pat_matrix(2, 1, 0, 0) == 2*mat2
# Third case, 1 in y, 0 in all others
mat3 = Matrix(((1, 0, 0), (0, 0, 0), (0, 0, 1)))
assert pat_matrix(1, 0, 1, 0) == mat3
assert pat_matrix(2, 0, 1, 0) == 2*mat3
# Fourth case, 1 in z, 0 in all others
mat4 = Matrix(((1, 0, 0), (0, 1, 0), (0, 0, 0)))
assert pat_matrix(1, 0, 0, 1) == mat4
assert pat_matrix(2, 0, 0, 1) == 2*mat4
def test_Pauli():
#this and the following test are testing both Pauli and Dirac matrices
#and also that the general Matrix class works correctly in a real world
#situation
sigma1 = msigma(1)
sigma2 = msigma(2)
sigma3 = msigma(3)
assert sigma1 == sigma1
assert sigma1 != sigma2
# sigma*I -> I*sigma (see #354)
assert sigma1*sigma2 == sigma3*I
assert sigma3*sigma1 == sigma2*I
assert sigma2*sigma3 == sigma1*I
assert sigma1*sigma1 == eye(2)
assert sigma2*sigma2 == eye(2)
assert sigma3*sigma3 == eye(2)
assert sigma1*2*sigma1 == 2*eye(2)
assert sigma1*sigma3*sigma1 == -sigma3
def test_Dirac():
gamma0 = mgamma(0)
gamma1 = mgamma(1)
gamma2 = mgamma(2)
gamma3 = mgamma(3)
gamma5 = mgamma(5)
# gamma*I -> I*gamma (see #354)
assert gamma5 == gamma0 * gamma1 * gamma2 * gamma3 * I
assert gamma1 * gamma2 + gamma2 * gamma1 == zeros(4)
assert gamma0 * gamma0 == eye(4) * minkowski_tensor[0, 0]
assert gamma2 * gamma2 != eye(4) * minkowski_tensor[0, 0]
assert gamma2 * gamma2 == eye(4) * minkowski_tensor[2, 2]
assert mgamma(5, True) == \
mgamma(0, True)*mgamma(1, True)*mgamma(2, True)*mgamma(3, True)*I
|
lidavidm/mathics-heroku
|
venv/lib/python2.7/site-packages/sympy/physics/tests/test_physics_matrices.py
|
Python
|
gpl-3.0
| 2,212
|
[
"DIRAC"
] |
871238326da5471a6ba84191f092fb1ea36df43db7a70acb5154fd0b7284e389
|
# -*- coding: utf-8 -*-
"""
Lower calls.
"""
from pykit import types
from pykit.ir import visit, Const, FunctionPass
class Verify(object):
"""Verify the current state of calls"""
def op_call(self, op):
assert not op.args[0].type.is_object, "Object calls must have been resolved"
class ExceptionChecking(FunctionPass):
"""
Insert exception checking for calls.
Call metadata:
exc.badval: check against bad value to propagate errors
exc.raise: raise error if bad value encountered
"""
def op_call(self, op):
self.builder.position_after(op)
exc_badval = op.metadata.get("exc.badval")
exc = op.metadata.get("exc.raise")
if exc:
self._handle_raise(op, exc_badval, exc)
elif exc_badval is not None:
self.builder.check_error(op, exc_badval)
def _handle_raise(self, op, badval, exc):
"Raise an exception if retval == badval"
assert badval is not None
cond = self.builder.eq(types.Bool, [op, badval])
with self.builder.if_(cond):
msg = op.metadata.get("exc.msg")
args = [Const(msg)] if msg is not None else []
exc = self.builder.new_exc(types.Exception, [exc] + args)
self.builder.exc_throw(exc)
# call virtual
# call math
def run(func, env):
"""Generate runtime calls into thread library"""
if env.get("verify"):
visit(Verify(), func)
visit(ExceptionChecking(func), func)
|
ContinuumIO/pykit
|
pykit/lower/lower_calls.py
|
Python
|
bsd-3-clause
| 1,497
|
[
"VisIt"
] |
e9ffe9722188a3791e16fd418a1bda0631c44d4a2cbde293b1c9cf26a046cd36
|
from WebAppDIRAC.Lib.WebHandler import WebSocketHandler, asyncGen
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import gConfig, gLogger
from DIRAC.Core.Utilities import Time, List
from DIRAC.Core.Utilities.CFG import CFG
from DIRAC.ConfigurationSystem.private.Modificator import Modificator
import json
import types
class ConfigurationManagerHandler( WebSocketHandler ):
AUTH_PROPS = "authenticated"
def on_open( self ):
self.__configData = {}
@asyncGen
def on_message( self, msg ):
self.log.info( "RECEIVED %s" % msg )
try:
params = json.loads( msg )
except:
gLogger.exception( "No op defined" )
res = False
if params["op"] == "init":
res = yield self.threadTask( self.__getRemoteConfiguration, "init" )
elif params["op"] == "getSubnodes":
res = self.__getSubnodes( params["node"], params["nodePath"] )
elif params["op"] == "showConfigurationAsText":
res = self.__showConfigurationAsText()
elif params["op"] == "resetConfiguration":
res = yield self.threadTask( self.__getRemoteConfiguration, "resetConfiguration" )
elif params["op"] == "getBulkExpandedNodeData":
res = self.__getBulkExpandedNodeData( params["nodes"] )
elif params["op"] == "setOptionValue":
res = self.__setOptionValue( params )
elif params["op"] == "setComment":
res = self.__setComment( params )
elif params["op"] == "copyKey":
res = self.__copyKey( params )
elif params["op"] == "renameKey":
res = self.__renameKey( params )
elif params["op"] == "deleteKey":
res = self.__deleteKey( params )
elif params["op"] == "createSection":
res = self.__createSection( params )
elif params["op"] == "createOption":
res = self.__createOption( params )
elif params["op"] == "moveNode":
res = self.__moveNode( params )
elif params["op"] == "commitConfiguration":
res = yield self.threadTask( self.__commitConfiguration )
elif params["op"] == "showCurrentDiff":
res = self.__showCurrentDiff()
elif params["op"] == "showshowHistory":
res = self.__history()
elif params["op"] == "showDiff":
res = self.__showDiff( params )
elif params["op"] == "rollback":
res = self.__rollback( params )
elif params["op"] == "download":
res = self.__download( )
gLogger.info( "Sending back message %s" % res )
if res:
self.write_message( res )
def __getRemoteConfiguration( self, funcName ):
rpcClient = RPCClient( gConfig.getValue( "/DIRAC/Configuration/MasterServer", "Configuration/Server" ) )
modCfg = Modificator( rpcClient )
retVal = modCfg.loadFromRemote()
if not retVal[ 'OK' ]:
return {"success":0, "op":"getSubnodes", "message":"The configuration cannot be read from the remote !"}
self.__configData[ 'cfgData' ] = modCfg
self.__configData[ 'strCfgData' ] = str( modCfg )
version = str( modCfg.getCFG()["DIRAC"]["Configuration"]["Version"] )
configName = str( modCfg.getCFG()["DIRAC"]["Configuration"]["Name"] )
return {"success":1, "op":funcName, "version":version, "name":configName}
def __getSubnodes( self, parentNodeId, sectionPath ):
gLogger.info( "Expanding section", "%s" % sectionPath )
retData = []
retVal = self.__getSubnodesForPath( sectionPath, retData )
if not retVal:
gLogger.exception( "Section does not exist", "%s -> %s" % ( sectionPath, str( v ) ) )
return {"success":0, "op":"getSubnodes", "message":"Section %s does not exist: %s" % ( sectionPath, str( v ) )}
return {"success":1, "op":"getSubnodes", "nodes":retData, "parentNodeId":parentNodeId}
def __getSubnodesForPath( self, sectionPath, retData ):
try:
sectionCfg = self.__configData[ 'cfgData' ].getCFG()
for section in [ section for section in sectionPath.split( "/" ) if not section.strip() == "" ]:
sectionCfg = sectionCfg[ section ]
except Exception, v:
return False
for entryName in sectionCfg.listAll():
comment = sectionCfg.getComment( entryName )
nodeDef = { 'text' : entryName, 'csName' : entryName, 'csComment' : comment }
nodeDef[ 'leaf' ] = False
nodeDef[ 'expanded' ] = False
if not sectionCfg.isSection( entryName ):
nodeDef[ 'leaf' ] = True
nodeDef[ 'csValue' ] = sectionCfg[ entryName ]
nodeDef[ 'text' ] = nodeDef[ 'text' ] + " = " + nodeDef[ 'csValue' ]
# Comment magic
htmlC = self.__htmlComment( comment )
if htmlC:
qtipDict = { 'text' : htmlC }
nodeDef[ 'qtipCfg' ] = qtipDict
retData.append( nodeDef )
return True
def __htmlComment( self, rawComment ):
commentLines = []
commiter = ""
rawLines = rawComment.strip().split( "\n" )
if rawLines[-1].find( "@@-" ) == 0:
commiter = rawLines[-1][3:]
rawLines.pop( -1 )
for line in rawLines:
line = line.strip()
if not line:
continue
commentLines.append( line )
if commentLines or commiter:
return "%s<small><strong>%s</strong></small>" % ( "<br/>".join( commentLines ), commiter )
else:
return False
def __showConfigurationAsText( self ):
# time.sleep(10)
return {"success":1, "op":"showConfigurationAsText", "text":self.__configData[ 'strCfgData' ]}
def __getBulkExpandedNodeData( self, nodes ):
nodesPaths = nodes.split( "<<||>>" )
returnData = []
for nodePath in nodesPaths:
pathData = []
if self.__getSubnodesForPath( nodePath, pathData ):
returnData.append( [nodePath, pathData] )
return {"success":1, "op":"getBulkExpandedNodeData", "data":returnData}
def __setOptionValue( self, params ):
try:
optionPath = str( params[ 'path' ] )
optionValue = str( params[ 'value' ] )
except Exception, e:
return {"success":0, "op":"setOptionValue", "message":"Can't decode path or value: %s" % str( e )}
self.__setCommiter()
self.__configData[ 'cfgData' ].setOptionValue( optionPath, optionValue )
if self.__configData[ 'cfgData' ].getValue( optionPath ) == optionValue:
gLogger.info( "Set option value", "%s = %s" % ( optionPath, optionValue ) )
return {"success":1, "op":"setOptionValue", "parentNodeId":params["parentNodeId"], "value":optionValue}
return {"success":0, "op":"setOptionValue", "message":"Can't update %s" % optionPath}
def __setComment( self, params ):
try:
path = str( params[ 'path' ] )
value = str( params[ 'value' ] )
except Exception, e:
return {"success":0, "op":"setComment", "message":"Can't decode path or value: %s" % str( e )}
self.__setCommiter()
self.__configData[ 'cfgData' ].setComment( path, value )
gLogger.info( "Set comment", "%s = %s" % ( path, value ) )
return {"success":1, "op":"setComment", "parentNodeId":params["parentNodeId"], "comment":self.__configData[ 'cfgData' ].getComment( path )}
def __copyKeyOld( self, params ):
try:
originalPath = str( params[ 'copyFromPath' ] ).strip()
toCopyPath = str( params[ 'copyToPath' ] ).strip()
newName = str( params[ 'newName' ] ).strip()
except Exception, e:
return {"success":0, "op":"copyKey", "message":"Can't decode parameter: %s" % str( e )}
try:
if len( originalPath ) == 0:
return {"success":0, "op":"copyKey", "message":"Parent path is not valid"}
if len( newName ) == 0:
return {"success":0, "op":"copyKey", "message":"Put any name for the new key!"}
self.__setCommiter()
if self.__configData[ 'cfgData' ].copyKey( originalPath, newName ):
pathList = List.fromChar( originalPath, "/" )
# newPath = "/%s/%s" % ( "/".join( pathList[:-1] ), newName )
if self.__configData[ 'cfgData' ].existsSection( toCopyPath ):
return {"success":1, "op":"copyKey", "parentNodeToId":params["parentNodeToId"], "parentNodeFromId":params["parentNodeFromId"], "newName":newName, "comment":self.__configData[ 'cfgData' ].getComment( newPath )}
else:
return {"success":1, "op":"copyKey", "parentNodeToId":params["parentNodeToId"], "parentNodeFromId":params["parentNodeFromId"], "value":self.__configData[ 'cfgData' ].getValue( newPath ), "newName":newName, "comment":self.__configData[ 'cfgData' ].getComment( newPath )}
else:
return {"success":0, "op":"copyKey", "message":"Path can't be created. Exists already?"}
except Exception, e:
raise
return {"success":0, "op":"copyKey", "message":"Can't create path: %s" % str( e )}
def __renameKey( self, params ):
try:
keyPath = str( params[ 'path' ] ).strip()
newName = str( params[ 'newName' ] ).strip()
except Exception, e:
return {"success":0, "op":"renameKey", "message":"Can't decode parameter: %s" % str( e )}
try:
if len( keyPath ) == 0:
return {"success":0, "op":"renameKey", "message":"Entity path is not valid"}
if len( newName ) == 0:
return {"success":0, "op":"renameKey", "message":"Put any name for the entity!"}
if self.__configData[ 'cfgData' ].existsOption( keyPath ) or self.__configData[ 'cfgData' ].existsSection( keyPath ) :
self.__setCommiter()
if self.__configData[ 'cfgData' ].renameKey( keyPath, newName ):
return {"success":1, "op":"renameKey", "parentNodeId":params["parentNodeId"], "newName":newName}
else:
return {"success":0, "op":"renameKey", "message":"There was a problem while renaming"}
else:
return {"success":0, "op":"renameKey", "message":"Path doesn't exist"}
except Exception, e:
return {"success":0, "op":"renameKey", "message":"Can't rename entity: %s" % str( e )}
def __deleteKey( self, params ):
try:
keyPath = str( params[ 'path' ] ).strip()
except Exception, e:
return {"success":0, "op":"deleteKey", "message":"Can't decode parameter: %s" % str( e )}
try:
if len( keyPath ) == 0:
return {"success":0, "op":"deleteKey", "message":"Entity path is not valid"}
if self.__configData[ 'cfgData' ].removeOption( keyPath ) or self.__configData[ 'cfgData' ].removeSection( keyPath ):
return {"success":1, "op":"deleteKey", "parentNodeId":params["parentNodeId"]}
else:
return {"success":0, "op":"deleteKey", "message":"Entity doesn't exist"}
except Exception, e:
return {"success":0, "op":"deleteKey", "message":"Can't rename entity: %s" % str( e )}
def __createSection( self, params ):
try:
parentPath = str( params[ 'path' ] ).strip()
sectionName = str( params[ 'name' ] ).strip()
configText = str( params[ 'config' ] ).strip()
except Exception, e:
return {"success":0, "op":"createSection", "message":"Can't decode parameter: %s" % str( e )}
try:
if len( parentPath ) == 0:
return {"success":0, "op":"createSection", "message":"Parent path is not valid"}
if len( sectionName ) == 0:
return {"success":0, "op":"createSection", "message":"Put any name for the section!"}
sectionPath = "%s/%s" % ( parentPath, sectionName )
gLogger.info( "Creating section", "%s" % sectionPath )
self.__setCommiter()
if self.__configData[ 'cfgData' ].createSection( sectionPath ):
nD = { 'text' : sectionName, 'csName' : sectionName, 'csComment' : self.__configData[ 'cfgData' ].getComment( sectionPath ) }
htmlC = self.__htmlComment( nD[ 'csComment' ] )
if htmlC:
qtipDict = { 'text' : htmlC }
nD[ 'qtipCfg' ] = qtipDict
# If config Text is provided then a section is created out of that text
if configText != "":
cfgData = self.__configData[ 'cfgData' ].getCFG()
newCFG = CFG()
newCFG.loadFromBuffer( configText )
self.__setCommiter()
self.__configData[ 'cfgData' ].mergeSectionFromCFG( sectionPath, newCFG )
return {"success":1, "op":"createSection", "parentNodeId":params["parentNodeId"], "node":nD, "sectionFromConfig": 1}
else:
return {"success":1, "op":"createSection", "parentNodeId":params["parentNodeId"], "node":nD, "sectionFromConfig": 0}
else:
return {"success":0, "op":"createSection", "message":"Section can't be created. It already exists?"}
except Exception, e:
return {"success":0, "op":"createSection", "message":"Can't create section: %s" % str( e )}
def __createOption( self, params ):
try:
parentPath = str( params[ 'path' ] ).strip()
optionName = str( params[ 'name' ] ).strip()
optionValue = str( params[ 'value' ] ).strip()
except Exception, e:
return {"success":0, "op":"createOption", "message":"Can't decode parameter: %s" % str( e )}
try:
if len( parentPath ) == 0:
return {"success":0, "op":"createOption", "message":"Parent path is not valid"}
if len( optionName ) == 0:
return {"success":0, "op":"createOption", "message":"Put any name for the option!"}
if "/" in optionName:
return {"success":0, "op":"createOption", "message":"Options can't have a / in the name"}
if len( optionValue ) == 0:
return {"success":0, "op":"createOption", "message":"Options should have values!"}
optionPath = "%s/%s" % ( parentPath, optionName )
gLogger.info( "Creating option", "%s = %s" % ( optionPath, optionValue ) )
if not self.__configData[ 'cfgData' ].existsOption( optionPath ):
self.__setCommiter()
self.__configData[ 'cfgData' ].setOptionValue( optionPath, optionValue )
return {"success":1, "op":"createOption", "parentNodeId":params["parentNodeId"], "optionName":optionName, "value":self.__configData[ 'cfgData' ].getValue( optionPath ), "comment":self.__configData[ 'cfgData' ].getComment( optionPath )}
else:
return {"success":0, "op":"createOption", "message":"Option can't be created. It already exists?"}
except Exception, e:
return {"success":0, "op":"createOption", "message":"Can't create option: %s" % str( e )}
def __moveNode( self, params ):
try:
nodePath = params[ 'nodePath' ]
destinationParentPath = params[ 'newParentPath' ]
beforeOfIndex = int( params[ 'beforeOfIndex' ] )
except Exception, e:
return {"success":0, "op":"moveNode", "message":"Can't decode parameter: %s" % str( e ), "nodeId":params["nodeId"], "parentOldId":params["parentOldId"], "parentNewId":params["parentNewId"], "oldIndex":params["oldIndex"]}
gLogger.info( "Moving %s under %s before pos %s" % ( nodePath, destinationParentPath, beforeOfIndex ) )
cfgData = self.__configData[ 'cfgData' ].getCFG()
nodeDict = cfgData.getRecursive( nodePath )
if not nodeDict:
return {"success":0, "op":"moveNode", "message":"Moving entity does not exist", "nodeId":params["nodeId"], "parentOldId":params["parentOldId"], "parentNewId":params["parentNewId"], "oldIndex":params["oldIndex"]}
oldParentDict = cfgData.getRecursive( nodePath, -1 )
newParentDict = cfgData.getRecursive( destinationParentPath )
if type( newParentDict ) == types.StringType:
return {"success":0, "op":"moveNode", "message":"Destination is not a section", "nodeId":params["nodeId"], "parentOldId":params["parentOldId"], "parentNewId":params["parentNewId"], "oldIndex":params["oldIndex"]}
if not newParentDict:
return {"success":0, "op":"moveNode", "message":"Destination does not exist", "nodeId":params["nodeId"], "parentOldId":params["parentOldId"], "parentNewId":params["parentNewId"], "oldIndex":params["oldIndex"]}
# Calculate the old parent path
oldParentPath = "/%s" % "/".join( List.fromChar( nodePath, "/" )[:-1] )
if not oldParentPath == destinationParentPath and newParentDict['value'].existsKey( nodeDict['key'] ):
return {"success":0, "op":"moveNode", "message":"Another entry with the same name already exists", "nodeId":params["nodeId"], "parentOldId":params["parentOldId"], "parentNewId":params["parentNewId"], "oldIndex":params["oldIndex"]}
try:
brothers = newParentDict[ 'value' ].listAll()
if beforeOfIndex < len( brothers ):
nodeDict[ 'beforeKey' ] = brothers[ beforeOfIndex ]
oldParentDict[ 'value' ].deleteKey( nodeDict[ 'key' ] )
addArgs = {}
for key in ( 'comment', 'beforeKey', 'value', 'key' ):
if key in nodeDict:
addArgs[ key ] = nodeDict[ key ]
newParentDict[ 'value' ].addKey( **addArgs )
except Exception, e:
return {"success":0, "op":"moveNode", "message":"Can't move node: %s" % str( e ), "nodeId":params["nodeId"], "parentOldId":params["parentOldId"], "parentNewId":params["parentNewId"], "oldIndex":params["oldIndex"]}
return {"success":1, "op":"moveNode", "nodeId":params["nodeId"], "parentOldId":params["parentOldId"], "parentNewId":params["parentNewId"], "beforeOfIndex":params["beforeOfIndex"]}
def __copyKey( self, params ):
try:
nodePath = params[ 'copyFromPath' ]
destinationParentPath = params[ 'copyToPath' ]
newNodeName = params[ 'newName' ]
except Exception, e:
return {"success":0, "op":"copyKey", "message":"Can't decode parameter: %s" % str( e )}
# gLogger.info( "Moving %s under %s before pos %s" % ( nodePath, destinationParentPath, beforeOfIndex ) )
cfgData = self.__configData[ 'cfgData' ].getCFG()
self.__setCommiter()
nodeDict = cfgData.getRecursive( nodePath )
if not nodeDict:
return {"success":0, "op":"copyKey", "message":"Moving entity does not exist"}
oldParentDict = cfgData.getRecursive( nodePath, -1 )
newParentDict = cfgData.getRecursive( destinationParentPath )
if type( newParentDict ) == types.StringType:
return {"success":0, "op":"copyKey", "message":"Destination is not a section"}
if not newParentDict:
return {"success":0, "op":"copyKey", "message":"Destination does not exist"}
# Calculate the old parent path
oldParentPath = "/%s" % "/".join( List.fromChar( nodePath, "/" )[:-1] )
if not oldParentPath == destinationParentPath and newParentDict['value'].existsKey( newNodeName ):
return {"success":0, "op":"copyKey", "message":"Another entry with the same name already exists"}
try:
brothers = newParentDict[ 'value' ].listAll()
nodeDict["key"] = newNodeName
addArgs = {}
for key in ( 'comment', 'beforeKey', 'key' ):
if key in nodeDict:
addArgs[ key ] = nodeDict[ key ]
if isinstance(nodeDict["value"], str):
newParentDict[ 'value' ].addKey(nodeDict.get("key",""),nodeDict.get("value",""),nodeDict.get("comment",""), nodeDict.get("beforeKey",""))
else:
addArgs["value"] = nodeDict["value"].clone()
newParentDict[ 'value' ].addKey( **addArgs )
except Exception, e:
return {"success":0, "op":"copyKey", "message":"Can't move node: %s" % str( e )}
return {"success":1, "op":"copyKey", "newName":nodeDict['key'], "nodeId":params["nodeId"], "parentNodeToId":params["parentNodeToId"]}
def __commitConfiguration( self ):
data = self.getSessionData()
isAuth = False
if "properties" in data["user"]:
if "CSAdministrator" in data["user"]["properties"]:
isAuth = True
if not isAuth:
return {"success":0, "op":"commitConfiguration", "message":"You are not authorized to commit configurations!! Bad boy!"}
gLogger.always( "User %s is commiting a new configuration version" % data["user"]["DN"] )
retDict = self.__configData[ 'cfgData' ].commit()
if not retDict[ 'OK' ]:
return {"success":0, "op":"commitConfiguration", "message":retDict[ 'Message' ]}
return {"success":1, "op":"commitConfiguration"}
def __authorizeAction( self ):
data = self.getSessionData()
isAuth = False
if "properties" in data["user"]:
if "CSAdministrator" in data["user"]["properties"]:
isAuth = True
return isAuth
def __generateHTMLDiff( self, diffGen ):
diffList = []
linesDiffList = []
oldChange = False
lineNumber = 0
for diffLine in diffGen:
if diffLine[0] == "-":
diffList.append( ( "del", diffLine[1:], "", lineNumber ) )
linesDiffList.append( ["del", lineNumber] )
lineNumber = lineNumber + 1
elif diffLine[0] == "+":
if oldChange:
diffList[-1] = ( "mod", diffList[-1][1], diffLine[1:], lineNumber )
linesDiffList[-1] = ["mod", lineNumber]
oldChange = False
else:
diffList.append( ( "add", "", diffLine[1:], lineNumber ) )
linesDiffList.append( ["add", lineNumber] )
lineNumber = lineNumber + 1
elif diffLine[0] == "?":
if diffList[-1][0] == 'del':
oldChange = True
elif diffList[-1][0] == "mod":
diffList[-1] = ( "conflict", diffList[-1][1], diffList[-1][2], lineNumber )
linesDiffList[-1] = ["conflict", lineNumber]
elif diffList[-1][0] == "add":
diffList[-2] = ( "mod", diffList[-2][1], diffList[-1][2], lineNumber )
linesDiffList[-2] = ["mod", lineNumber]
del( diffList[-1] )
lineNumber = lineNumber - 1
else:
diffList.append( ( "", diffLine[1:], diffLine[1:], lineNumber ) )
lineNumber = lineNumber + 1
return {"diff":diffList, "lines": linesDiffList, "totalLines": lineNumber}
def __showCurrentDiff( self ):
if not self.__authorizeAction():
return {"success":0, "op":"showCurrentDiff", "message":"You are not authorized to commit configurations!! Bad boy!"}
diffGen = self.__configData[ 'cfgData' ].showCurrentDiff()
processedData = self.__generateHTMLDiff( diffGen )
return self.write_message( json.dumps( {"success":1, "op":"showCurrentDiff", "lines":processedData["lines"], "totalLines": processedData["totalLines"], "html":self.render_string( "ConfigurationManager/diffConfig.tpl",
titles = ( "Server's version", "User's current version" ),
diffList = processedData["diff"] )} ) )
def __showDiff( self, params ):
if not self.__authorizeAction():
raise WErr( 500, "You are not authorized to get diff's!! Bad boy!" )
try:
fromDate = str( params[ 'fromVersion' ] )
toDate = str( params[ 'toVersion' ] )
except Exception, e:
raise WErr( 500, "Can't decode params: %s" % e )
rpcClient = RPCClient( gConfig.getValue( "/DIRAC/Configuration/MasterServer", "Configuration/Server" ) )
modCfg = Modificator( rpcClient )
diffGen = modCfg.getVersionDiff( fromDate, toDate )
processedData = self.__generateHTMLDiff( diffGen )
return self.write_message( json.dumps( {"success":1, "op":"showDiff", "lines":processedData["lines"], "totalLines": processedData["totalLines"], "html":self.render_string( "ConfigurationManager/diffConfig.tpl",
titles = ( "From version %s" % fromDate, "To version %s" % toDate ),
diffList = processedData["diff"] )} ) )
def __rollback( self, params ):
rollbackVersion = ""
if not self.__authorizeAction():
raise WErr( 500, "You are not authorized to get diff's!! Bad boy!" )
try:
rollbackVersion = str( params[ 'rollbackToVersion' ] )
except Exception, e:
raise WErr( 500, "Can't decode params: %s" % e )
rpcClient = RPCClient( gConfig.getValue( "/DIRAC/Configuration/MasterServer", "Configuration/Server" ) )
modCfg = Modificator( rpcClient )
retVal = modCfg.rollbackToVersion( rollbackVersion )
if retVal[ 'OK' ]:
return {"success":1, "op":"rollback", "version":rollbackVersion}
else:
return {"success":0, "op":"rollback", "message":retVal['Value']}
def __setCommiter( self ):
sessionData = self.getSessionData()
commiter = "%s@%s - %s" % ( sessionData["user"]["username"],
sessionData["user"]["group"],
Time.dateTime().strftime( "%Y-%m-%d %H:%M:%S" ) )
self.__configData[ 'cfgData' ].commiterId = commiter
def __history( self ):
if not self.__authorizeAction():
raise WErr( 500, "You are not authorized to commit configurations!! Bad boy!" )
rpcClient = RPCClient( gConfig.getValue( "/DIRAC/Configuration/MasterServer", "Configuration/Server" ) )
retVal = rpcClient.getCommitHistory()
if retVal[ 'OK' ]:
cDict = { 'numVersions' : 0, 'versions' : [] }
for entry in retVal[ 'Value' ]:
cDict[ 'numVersions' ] += 1
cDict[ 'versions' ].append( { 'version' : entry[1], 'commiter' : entry[0] } )
else:
raise WErr.fromSERROR( retVal )
return {"success":1, "op":"showshowHistory", "result":cDict}
def __download(self):
version = str( self.__configData['cfgData'].getCFG()["DIRAC"]["Configuration"]["Version"] )
configName = str( self.__configData['cfgData'].getCFG()["DIRAC"]["Configuration"]["Name"] )
fileName = "cs.%s.%s" % ( configName, version.replace( ":", "" ).replace( "-", "" ).replace( " ", "" ) )
return {"success":1, "op":"download", "result":self.__configData[ 'strCfgData' ],"fileName":fileName}
|
zmathe/WebAppDIRAC
|
WebApp/handler/ConfigurationManagerHandler.py
|
Python
|
gpl-3.0
| 25,480
|
[
"DIRAC"
] |
2544edc1d94fa0b935a2636b60b530755b0aa89b74f1c0475485373b94fdb89f
|
import matplotlib
matplotlib.use('Agg')
import sys
sys.path.append('../../')
import numpy as np
import scipy.stats
import matplotlib.pylab as plt
import os, sys
import geepee.aep_models as aep
import geepee.vfe_models as vfe
import geepee.ep_models as ep
import pdb
np.random.seed(42)
# We first define several utility functions
def func_true(x):
fx = -0.5*x + 5*np.cos(0.5*x)
return fx
def func(T, process_noise, obs_noise, xprev=None):
if xprev is None:
xprev = np.random.randn()
y = np.zeros([T, ])
x = np.zeros([T, ])
xtrue = np.zeros([T, ])
for t in range(T):
fx = -0.5*xprev + 5*np.cos(0.5*xprev)
xtrue[t] = fx
x[t] = fx + np.sqrt(process_noise)*np.random.randn()
xprev = x[t]
y[t] = 0.5*x[t] + np.sqrt(obs_noise)*np.random.randn()
return xtrue, x, y
def plot_latent(model, y, plot_title=''):
# make prediction on some test inputs
N_test = 300
C = model.get_hypers()['C_emission'][0, 0]
x_test = np.linspace(-10, 8, N_test) / C
x_test = np.reshape(x_test, [N_test, 1])
if isinstance(model, aep.SGPSSM) or isinstance(model, vfe.SGPSSM):
zu = model.dyn_layer.zu
else:
zu = model.sgp_layer.zu
mu, vu = model.predict_f(zu)
# mu, Su = model.dyn_layer.mu, model.dyn_layer.Su
mf, vf = model.predict_f(x_test)
my, vy = model.predict_y(x_test)
# plot function
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.plot(x_test[:,0], kink_true(x_test[:,0]), '-', color='k')
ax.plot(C*x_test[:,0], my[:,0], '-', color='r', label='y')
ax.fill_between(
C*x_test[:,0],
my[:,0] + 2*np.sqrt(vy[:, 0]),
my[:,0] - 2*np.sqrt(vy[:, 0]),
alpha=0.2, edgecolor='r', facecolor='r')
ax.plot(
y[0:model.N-1],
y[1:model.N],
'r+', alpha=0.5)
mx, vx = model.get_posterior_x()
ax.set_xlabel(r'$x_{t-1}$')
ax.set_ylabel(r'$x_{t}$')
plt.title(plot_title)
plt.savefig('/tmp/lincos_'+plot_title+'.png')
# generate a dataset from the lincos function above
T = 200
process_noise = 0.2
obs_noise = 0.1
(xtrue, x, y) = func(T, process_noise, obs_noise)
y_train = np.reshape(y, [y.shape[0], 1])
# init hypers
Dlatent = 1
Dobs = 1
M = 15
# create VFE model
np.random.seed(42)
model_vfe = vfe.SGPSSM(y_train, Dlatent, M,
lik='Gaussian', prior_mean=0, prior_var=1)
vfe_hypers = model_vfe.init_hypers(y_train)
model_vfe.update_hypers(vfe_hypers)
# optimise
# model_vfe.optimise(method='L-BFGS-B', maxiter=10000, reinit_hypers=False)
model_vfe.optimise(method='adam', adam_lr=0.001, maxiter=20000, reinit_hypers=False)
opt_hypers = model_vfe.get_hypers()
plot_latent(model_vfe, y, 'VFE')
alphas = [0.001, 0.05, 0.2, 0.5, 1.0]
# alphas = [0.05]
for alpha in alphas:
print 'alpha = %.3f' % alpha
# create AEP model
np.random.seed(42)
model_aep = aep.SGPSSM(y_train, Dlatent, M,
lik='Gaussian', prior_mean=0, prior_var=1)
aep_hypers = model_aep.init_hypers(y_train)
model_aep.update_hypers(aep_hypers)
# optimise
# model_aep.optimise(method='L-BFGS-B', alpha=alpha, maxiter=10000, reinit_hypers=False)
model_aep.optimise(method='adam', alpha=alpha, adam_lr=0.001, maxiter=20000, reinit_hypers=False)
opt_hypers = model_aep.get_hypers()
plot_latent(model_aep, y, 'AEP_%.3f'%alpha)
# # create EP model
# model_ep = ep.SGPSSM(y_train, Dlatent, M,
# lik='Gaussian', prior_mean=0, prior_var=1000)
# # init EP model using the AEP solution
# model_ep.update_hypers(opt_hypers)
# # run EP
# if alpha == 1.0:
# decay = 0.999
# parallel = True
# no_epochs = 200
# elif alpha == 0.001 or alpha == 0.05 or alpha ==0.2:
# decay = 0.5
# parallel = True
# no_epochs = 1000
# else:
# decay = 0.99
# parallel = True
# no_epochs = 500
# model_ep.inference(no_epochs=no_epochs, alpha=alpha, parallel=parallel, decay=decay)
# plot_latent(model_ep, y, 'PEP_%.3f'%alpha)
# # create EP model
# model_ep = ep.SGPSSM(y_train, Dlatent, M,
# lik='Gaussian', prior_mean=0, prior_var=1000)
# # init EP model using the AEP solution
# model_ep.update_hypers(opt_hypers)
# aep_sgp_layer = model_aep.dyn_layer
# Nm1 = aep_sgp_layer.N
# model_ep.sgp_layer.t1 = 1.0/Nm1 * np.tile(
# aep_sgp_layer.theta_2[np.newaxis, :, :], [Nm1, 1, 1])
# model_ep.sgp_layer.t2 = 1.0/Nm1 * np.tile(
# aep_sgp_layer.theta_1[np.newaxis, :, :, :], [Nm1, 1, 1, 1])
# model_ep.x_prev_1 = np.copy(model_aep.x_factor_1)
# model_ep.x_prev_2 = np.copy(model_aep.x_factor_2)
# model_ep.x_next_1 = np.copy(model_aep.x_factor_1)
# model_ep.x_next_2 = np.copy(model_aep.x_factor_2)
# model_ep.x_up_1 = np.copy(model_aep.x_factor_1)
# model_ep.x_up_2 = np.copy(model_aep.x_factor_2)
# model_ep.x_prev_1[0, :] = 0
# model_ep.x_prev_2[0, :] = 0
# model_ep.x_next_1[-1, :] = 0
# model_ep.x_next_2[-1, :] = 0
# # run EP
# if alpha == 1.0:
# decay = 0.999
# parallel = True
# no_epochs = 200
# elif alpha == 0.001 or alpha == 0.05 or alpha == 0.2:
# decay = 0.5
# parallel = True
# no_epochs = 1000
# else:
# decay = 0.99
# parallel = True
# no_epochs = 500
# model_ep.inference(no_epochs=no_epochs, alpha=alpha, parallel=parallel, decay=decay)
# plot_latent(model_ep, y, 'PEP_(AEP_init)_%.3f'%alpha)
|
thangbui/geepee
|
exps/gpssm/lin_cos_exp.py
|
Python
|
mit
| 5,539
|
[
"Gaussian"
] |
cefb8d3c461dd565dc15bf70f72027079ced842bcf4bb3bb87d10b66f1aa9e62
|
# Copyright 2002 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# python unittest framework
import unittest
import copy
# modules to be tested
from Bio.Crystal import Hetero, Chain, Crystal, CrystalError
class ChainTestCase(unittest.TestCase):
def setUp(self):
self.a = 'C A A C T A G G T C A C U A G G T C A G'
self.b = 'C T G A C C T A G T G A C C T A G T T G'
self.c = 'THR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY ASP'
self.d = 'THR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY ASP '
self.e = 'TYR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY ASP '
self.f = 'THR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY SER '
self.g = 'C A A C T A G G T C A C U A G G T C A T'
self.h = 'G A A C T A G G T C A C U A G G T C A G'
def testEquals(self):
first = Chain(self.a)
second = Chain(self.a)
self.assertEqual(first, second)
first = Chain(self.b)
second = Chain(self.b)
self.assertEqual(first, second)
first = Chain(self.c)
second = Chain(self.c)
self.assertEqual(first, second)
first = Chain(self.a)
second = Chain(self.g)
self.assertNotEqual(first, second)
first = Chain(self.a)
second = Chain(self.h)
self.assertNotEqual(first, second)
first = Chain(self.c)
second = Chain(self.e)
self.assertNotEqual(first, second)
first = Chain(self.c)
second = Chain(self.f)
self.assertNotEqual(first, second)
def testLen(self):
chain = Chain(self.a)
elements = self.a.strip().split()
num_elements = len(elements)
self.assertEqual(len(chain), num_elements)
chain = Chain(self.b)
elements = self.b.strip().split()
num_elements = len(elements)
self.assertEqual(len(chain), num_elements)
chain = Chain(self.c)
elements = self.c.strip().split()
num_elements = len(elements)
self.assertEqual(len(chain), num_elements)
def testAppend(self):
chain = Chain(self.a[:])
chain.append('U')
elements = self.a.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual('u', last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.a[:])
chain.append(Hetero('A'))
elements = self.a.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual('a', last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.b[:])
chain.append('t')
elements = self.b.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual('t', last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.b[:])
chain.append(Hetero('C'))
elements = self.b.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual('c', last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.c[:])
chain.append('ser')
elements = self.c.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual('ser', last_element.data)
self.assertEqual(len(chain), num_elements + 1)
def testInsert(self):
chain = Chain(self.a[:])
i = 4
chain.insert(i, 'g')
elements = self.a.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual('g', target_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.a[:])
i = 0
chain.insert(i, 't')
elements = self.a.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual('t', target_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.b[:])
i = 9
chain.insert(i, Hetero('a'))
elements = self.a.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual('a', target_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.c[:])
i = 5
chain.insert(i, 'gln')
elements = self.c.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual('gln', target_element.data)
self.assertEqual(len(chain), num_elements + 1)
def testRemove(self):
chain = Chain(self.a[:])
elements = self.a.strip().split()
num_elements = len(elements)
num_a = chain.data.count(Hetero('a'))
chain.remove('a')
num_a_remaining = chain.data.count(Hetero('a'))
self.assertEqual(num_a_remaining, num_a - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.b[:])
elements = self.b.strip().split()
num_elements = len(elements)
num_b = chain.data.count(Hetero('t'))
chain.remove('t')
num_b_remaining = chain.data.count(Hetero('t'))
self.assertEqual(num_b_remaining, num_b - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.c[:])
elements = self.c.strip().split()
num_elements = len(elements)
num_leu = chain.data.count(Hetero('leu'))
chain.remove('leu')
num_leu_remaining = chain.data.count(Hetero('leu'))
self.assertEqual(num_leu_remaining, num_leu - 1)
self.assertEqual(len(chain), num_elements - 1)
def testCount(self):
chain = Chain(self.a[:])
num_a = chain.data.count(Hetero('a'))
self.assertEqual(chain.count('a'), num_a)
chain = Chain(self.b[:])
num_a = chain.data.count(Hetero('t'))
self.assertEqual(chain.count('t'), num_a)
chain = Chain(self.c[:])
num_a = chain.data.count(Hetero('leu'))
self.assertEqual(chain.count('leu'), num_a)
chain = Chain(self.c[:])
num_a = chain.data.count(Hetero('cys'))
self.assertEqual(chain.count('cys'), num_a)
def testIndex(self):
chain = Chain(self.a[:])
index_g = chain.data.index(Hetero('g'))
self.assertEqual(chain.index('g'), index_g)
chain = Chain(self.b[:])
index_c = chain.data.index(Hetero('c'))
self.assertEqual(chain.index('c'), index_c)
chain = Chain(self.c[:])
index_met = chain.data.index(Hetero('met'))
self.assertEqual(chain.index('met'), index_met)
def testGetItem(self):
chain = Chain(self.a[:])
element_3 = chain.data[3]
self.assertEqual(chain[3], element_3)
chain = Chain(self.a[:])
element_0 = chain.data[0]
self.assertEqual(chain[0], element_0)
chain = Chain(self.b[:])
element_7 = chain.data[7]
self.assertEqual(chain[7], element_7)
chain = Chain(self.b[:])
last_element = chain.data[-1]
self.assertEqual(chain[-1], last_element)
chain = Chain(self.c[:])
element_8 = chain.data[8]
self.assertEqual(chain[8], element_8)
def testSetItem(self):
chain = Chain(self.a[:])
chain[2] = 't'
element_2 = chain.data[2]
self.assertEqual(chain[2], element_2)
chain = Chain(self.a[:])
chain[0] = Hetero('U')
element_0 = chain.data[0]
self.assertEqual(chain[0], element_0)
chain = Chain(self.b[:])
chain[-1] = Hetero('c')
last_element = chain.data[-1]
self.assertEqual(chain[-1], last_element)
chain = Chain(self.b[:])
chain[1] = 'a'
element_1 = chain.data[1]
self.assertEqual(chain[1], element_1)
chain = Chain(self.c[:])
chain[5] = 'ser'
element_5 = chain.data[5]
self.assertEqual(chain[5], element_5)
def testDelItem(self):
chain = Chain(self.a[:])
elements = self.a.strip().split()
num_elements = len(elements)
num_t = chain.data.count(Hetero('t'))
del chain[4]
num_t_remaining = chain.data.count(Hetero('t'))
self.assertEqual(num_t_remaining, num_t - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.a[:])
elements = self.a.strip().split()
num_elements = len(elements)
num_u = chain.data.count(Hetero('u'))
del chain[12]
num_u_remaining = 0
self.assertEqual(num_u_remaining, num_u - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.b[:])
elements = self.b.strip().split()
num_elements = len(elements)
num_c = chain.data.count(Hetero('c'))
del chain[0]
num_c_remaining = chain.data.count(Hetero('c'))
self.assertEqual(num_c_remaining, num_c - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.b[:])
elements = self.b.strip().split()
num_elements = len(elements)
num_g = chain.data.count(Hetero('t'))
del chain[6]
num_g_remaining = chain.data.count(Hetero('t'))
self.assertEqual(num_g_remaining, num_g - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.c[:])
elements = self.c.strip().split()
num_elements = len(elements)
num_thr = chain.data.count(Hetero('thr'))
del chain[0]
num_thr_remaining = chain.data.count(Hetero('thr'))
self.assertEqual(num_thr_remaining, num_thr - 1)
self.assertEqual(len(chain), num_elements - 1)
def testGetSlice(self):
chain = Chain(self.a[:])
first = 0
last = len(chain)
slice = chain[:]
other = chain.data[:]
self.assertEqual(slice.data, other)
chain = Chain(self.a[:])
first = 0
last = 4
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
chain = Chain(self.b[:])
first = 2
last = len(chain)
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
chain = Chain(self.b[:])
first = -1
slice = chain[first:]
other = chain.data[first:]
self.assertEqual(slice.data, other)
chain = Chain(self.c[:])
first = 3
last = 7
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
chain = Chain(self.c[:])
first = 3
last = -1
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
def testSetSlice(self):
chain = Chain(self.a[:])
slice = 'G T C A G 5NC G C A T G G'
chain[:] = slice[4:7]
other = Chain(slice[4:7])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
slice = 'MET ILE GLU ILE LYS ASP'
chain[2:5] = slice
other = Chain(old_chain.data[:2] + Chain(slice).data + old_chain.data[5:])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
slice = 'CYS GLY ALA GLU CYS VAL TYR'
chain[7:] = slice
other = Chain(old_chain.data[:7] + Chain(slice).data)
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
slice = 'SER ASN GLU TRP ASP '
chain[:9] = slice
other = Chain(Chain(slice).data + old_chain.data[9:])
self.assertEqual(chain, other)
def testDelSlice(self):
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
del chain[3:8]
other = Chain(old_chain.data[:3] + old_chain.data[8:])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
del chain[:4]
other = Chain(old_chain.data[4:])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
del chain[9:]
other = Chain(old_chain.data[:9])
self.assertEqual(chain, other)
def testContains(self):
chain = Chain(self.c[:])
self.assertFalse('ser' in chain)
self.assertTrue('lys' in chain)
self.assertTrue('asp' in chain)
def testAdd(self):
texta = 'G U G G U C U G A U G A G G C C'
textb = 'G G C C G A A A C U C G U A A G A G U C A C C A C'
targeta = texta + Chain(textb)
targetb = Chain(texta) + textb
targetc = Chain(texta) + Chain(textb)
self.assertEqual(targeta, targetc)
self.assertEqual(targetb, targetc)
self.assertEqual(targeta, targetb)
self.assertEqual(len(targeta), len(Chain(texta)) + len(Chain(textb)))
targetd = Chain(texta)
targetd += textb
targete = Chain(texta)
targete += Chain(textb)
self.assertEqual(targetd, targetc)
self.assertEqual(targete, targetb)
class CrystalTestCase(unittest.TestCase):
def setUp(self):
self.crystal = Crystal({'a': 'T T G A C T C T C T T A A',
'b': Chain('G A G A G T C A'),
'c': 'T T G A C T C T C T T A A',
'd': Chain('G A G A G T C A')
})
def testLen(self):
self.assertEqual(len(self.crystal), len(self.crystal.data))
def testGetItem(self):
self.assertEqual(self.crystal['a'], self.crystal.data['a'])
def testSetItem(self):
target = copy.deepcopy(self.crystal)
e = 'MET ALA LEU THR ASN ALA GLN ILE LEU ALA VAL ILE ASP SER'
f = 'LEU GLY GLY GLY LEU GLN GLY THR LEU HIS CYS TYR GLU ILE PRO LEU'
target['e'] = e
target['f'] = Chain(f)
self.assertEqual(Chain(e), target['e'])
self.assertEqual(Chain(f), target['f'])
def testDelItem(self):
target = copy.deepcopy(self.crystal)
del target['b']
self.assertFalse('b' in target.data)
self.assertTrue('a' in target.data)
self.assertTrue('c' in target.data)
def testClear(self):
target = copy.deepcopy(self.crystal)
target.clear()
self.assertEqual(len(target.data), 0)
def testKeys(self):
self.assertEqual(list(self.crystal.keys()),
list(self.crystal.data.keys()))
def testValues(self):
self.assertEqual(list(self.crystal.values()),
list(self.crystal.data.values()))
def testItems(self):
self.assertEqual(list(self.crystal.items()),
list(self.crystal.data.items()))
def testKeys(self):
self.assertEqual(list(self.crystal.keys()),
list(self.crystal.data.keys()))
def testHasKey(self):
self.assertTrue('b' in self.crystal)
self.assertTrue('c' in self.crystal)
self.assertFalse('z' in self.crystal)
class HeteroTestCase(unittest.TestCase):
def testInit(self):
self.assertRaises(CrystalError, Hetero, 'abcd')
self.assertRaises(CrystalError, Hetero, '')
self.assertRaises(CrystalError, Hetero, 'A@#')
self.assertRaises(CrystalError, Hetero, [])
self.assertRaises(CrystalError, Hetero, {})
def testLen(self):
bru = Hetero('bru')
self.assertEqual(len(bru), 3)
_14w = Hetero('14w')
self.assertEqual(len(_14w), 3)
a = Hetero('a')
self.assertEqual(len(a), 1)
ga = Hetero('ga')
self.assertEqual(len(ga), 2)
def testEquals(self):
u = Hetero('u')
u1 = Hetero('u')
self.assertEqual(u, u1)
self.assertEqual(u, Hetero('U'))
self.assertNotEqual(u, Hetero('u1'))
self.assertNotEqual(u, Hetero('x'))
gna = Hetero('gna')
self.assertEqual(gna, Hetero('gNA'))
self.assertEqual(gna, Hetero('GnA'))
self.assertNotEqual(gna, Hetero('gnb'))
self.assertNotEqual(gna, Hetero('na'))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_Crystal.py
|
Python
|
gpl-2.0
| 16,782
|
[
"Biopython",
"CRYSTAL"
] |
c5d1a892131039fea003774854871c624e369f4b7c71c5b2f29b263a416136bf
|
#!/usr/bin/env python
# Import peak data into the meTRN structures!
import sys
import time
import optparse
import general
import numpy
import pickle
import pdb
import metrn
import modencode
import copy
import os
print "Command:", " ".join(sys.argv)
print "Timestamp:", time.asctime(time.localtime())
""" define functions of internal use """
def uppify(indict):
output = dict()
for key in indict:
newkey = key.upper()
output[newkey] = indict[key]
return output
def main():
parser = optparse.OptionParser()
parser.add_option("--path", action = "store", type = "string", dest = "path", help = "Path from script to files")
parser.add_option("--mode", action = "store", type = "string", dest = "mode", help = "Type of operations to perform")
parser.add_option("--peaks", action = "store", type = "string", dest = "peaks", help = "Basename for target peaks", default="OFF")
parser.add_option("--source", action = "store", type = "string", dest = "source", help = "Source path or file", default="OFF")
parser.add_option("--infile", action = "store", type = "string", dest = "infile", help = "Input file for analysis", default="OFF")
parser.add_option("--organism", action = "store", type = "string", dest = "organism", help = "Target organism for operations...", default="OFF")
parser.add_option("--rank", action = "store", type = "string", dest = "rank", help = "Name peaks by dataset and rank?", default="ON")
parser.add_option("--label", action = "store", type = "string", dest = "label", help = "Type of labels to generate", default="factor.context")
parser.add_option("--method", action = "store", type = "string", dest = "method", help = "Keep method in dataset labels?", default="ON")
parser.add_option("--target", action = "store", type = "string", dest = "target", help = "Targets...", default="OFF")
parser.add_option("--include", action = "store", type = "string", dest = "include", help = "Targets to include", default="OFF")
parser.add_option("--exclude", action = "store", type = "string", dest = "exclude", help = "Targets to exclude", default="OFF")
parser.add_option("--allow", action = "store", type = "string", dest = "allow", help = "Targets to allow, despite exclusion", default="OFF")
parser.add_option("--rename", action = "store", type = "string", dest = "rename", help = "Targets to rename. Comma-separated list of 'target:replacement' pairs to search and replace.", default="OFF")
parser.add_option("--nonredundant", action = "store", type = "string", dest = "nonredundant", help = "Filter redundants?", default="OFF")
parser.add_option("--parameters", action = "store", type = "string", dest = "parameters", help = "Variable parameters...", default="OFF")
parser.add_option("--server", action = "store", type = "string", dest = "server", help = "Server variables...", default="OFF")
parser.add_option("--fixed", action = "store", type = "string", dest = "fixed", help = "Should a fixed files be used?", default="OFF")
parser.add_option("--cutChr", action = "store", type = "string", dest = "cutChr", help = "Should first 3 letters (chr) be removed?", default="OFF")
parser.add_option("--filterChr", action = "store", type = "string", dest = "filterChr", help = "Remove peaks in these chromosomes", default="OFF")
parser.add_option("--idrSource", action = "store", type = "string", dest = "idrSource", help = "Take peaks from idr/final path?", default="ON")
parser.add_option("--reformat", action = "store", type = "string", dest = "reformat", help = "Should peaks be re-formatted?", default="OFF")
(option, args) = parser.parse_args()
# import paths:
path_dict = modencode.configBuild(option.path + "/input/" + "configure_path.txt")
# specify input and output paths:
inpath = path_dict["input"]
extraspath = path_dict["extras"]
pythonpath = path_dict["python"]
downloadpath = path_dict["download"]
fastqpath = path_dict["fastq"]
bowtiepath = path_dict["bowtie"]
bwapath = path_dict["bwa"]
macspath = path_dict["macs"]
memepath = path_dict["meme"]
idrpath = path_dict["idr"]
igvpath = path_dict["igv"]
testpath = path_dict["test"]
processingpath = path_dict["processing"]
annotationspath = path_dict["annotations"]
bindingpath = path_dict["binding"]
networkpath = path_dict["network"]
peakspath = path_dict["peaks"]
gopath = path_dict["go"]
hotpath = path_dict["hot"]
cellspath = path_dict["cells"]
neuronspath = path_dict["neurons"]
# standardize paths for analysis:
peakspath = peakspath + option.peaks + "/"
alignerpath = bwapath
indexpath = alignerpath + "index/"
alignmentpath = alignerpath + "alignment/"
qcfilterpath = alignerpath + "qcfilter/"
qcmergepath = alignerpath + "qcmerge/"
# import configuration dictionaries:
source_dict = modencode.configBuild(inpath + "configure_source.txt")
method_dict = modencode.configBuild(inpath + "configure_method.txt")
context_dict = modencode.configBuild(inpath + "configure_context.txt")
# define organism parameters:
if option.organism == "hs" or option.organism == "h.sapiens":
organismTag = "hs"
elif option.organism == "mm" or option.organism == "m.musculus":
organismTag = "mm"
elif option.organism == "ce" or option.organism == "c.elegans":
organismTag = "ce"
elif option.organism == "dm" or option.organism == "d.melanogaster":
organismTag = "dm"
# import peaks mode:
elif option.mode == "download.peaks":
print
# define destination:
sourcepath = idrpath + "final/" + option.source + "/"
general.pathGenerator(sourcepath)
# launch command:
print "Downloading peaks..."
command = "scp " + option.server + ":" + metrn.sharedSCG + option.parameters + " " + sourcepath
os.system(command)
print
# import peaks mode:
elif option.mode == "import.peaks":
print
# define input path:
if option.idrSource == "ON":
sourcepath = idrpath + "final/" + option.source + "/"
else:
sourcepath = option.path + "/" + option.source + "/"
sourcepath = sourcepath.replace("//", "/")
# define output path:
peakspath = idrpath + "peaks/" + option.peaks + "/"
general.pathGenerator(peakspath)
# gather peak files and transfer to the IDR peaks folder:
print "Gathering peaks into peak folder..."
infiles = os.listdir(sourcepath)
for infile in infiles:
# generate outfile name:
outfile = copy.deepcopy(infile)
# rename elements if necessary:
if option.rename != "OFF":
for scheme in option.rename.split(","):
target, replace = scheme.split(":")
outfile = outfile.replace(target, replace)
dataset = outfile.replace("_peaks.bed", "")
organism, strain, factor, context, institute, method = metrn.labelComponents(dataset)
print "\t".join([organism, strain, factor, context, institute, method])
#print infile
# rank-name peaks?
if option.rank == "ON":
i = 1
f_output = open(peakspath + outfile,"w")
inlines = open(sourcepath + infile).readlines()
for inline in inlines:
initems = inline.strip().split("\t")
if option.fixed == "ON" and len(initems) == 3:
chrm, start, stop = inline.strip().split("\t")
peak, score, strand, signal, pvalue, qvalue, point = "Peak_" + str(i), "0", ".", "0", "0", "0", "0"
elif option.reformat == "ON" and len(initems) == 5:
chrm, start, stop, name, score = inline.strip().split("\t")
peak, score, strand, signal, pvalue, qvalue, point = "Peak_" + str(i), score, ".", "0", "0", "0", "0"
else:
chrm, start, stop, peak, score, strand, signal, pvalue, qvalue, point = inline.strip().split("\t")
if option.method == "ON":
peak = dataset + ":P" + str(i)
else:
peak = "_".join([organism, strain, factor, context, institute]) + ":P" + str(i)
if option.cutChr == "ON":
chrm = chrm.lstrip("chr")
if option.filterChr == "OFF" or not chrm in option.filterChr.split(","):
print >>f_output, "\t".join([chrm, start, stop, peak, score, strand, signal, pvalue, qvalue, point])
i += 1
f_output.close()
# simply copy file...
else:
print "Transferring:", infile
command = "cp " + sourcepath + infile + " " + peakspath + outfile
os.system(command)
print
# select peaks mode:
if option.mode == "select.peaks":
print
# define input path and load input file names:
sourcepath = idrpath + "peaks/" + option.source + "/"
infiles = os.listdir(sourcepath)
# define output path:
peakspath = idrpath + "peaks/" + option.peaks + "/"
general.pathGenerator(peakspath)
# select dataset files that match the desired criteria:
i, j = 0, 0
print "Selecting compliant datasets..."
selections = list()
for infile in infiles:
inclusions, exclusions, allowances = list(), list(), list()
if option.include != "OFF":
for target in option.include.split(","):
if target in infile:
inclusions.append(target)
if option.exclude != "OFF":
for exclusion in option.exclude.split(","):
if exclusion in infile:
exclusions.append(exclusion)
if option.allow != "OFF":
for allowance in option.allow.split(","):
if allowance in infile:
allowances.append(allowance)
if inclusions != list() or len(inclusions) == len(option.include.split(";")) or option.include == "OFF":
if exclusions == list() or option.exclude == "OFF" or allowances != list():
selections.append(infile)
i += 1
# remove redundant datasets if necessary:
m, n = 0, 0
if option.nonredundant == "ON":
print "Filtering redundant datasets..."
dataset_dict = dict()
for infile in selections:
label = metrn.labelGenerator(option.label, mode="label", dataset=infile)
if not label in dataset_dict:
dataset_dict[label] = dict()
dataset_dict[label][infile] = general.countLines(sourcepath + infile)
selections = list()
for label in dataset_dict:
m += 1
sources = general.valuesort(dataset_dict[label])
sources.reverse()
if len(sources) > 1:
print label, len(sources), sources
n += 1
selections.append(sources[0])
print
# transfer peak files that match the desired targets:
print "Transferring selected datasets..."
for infile in selections:
command = "cp " + sourcepath + infile + " " + peakspath + infile
os.system(command)
j += 1
print "Transferred selections:", j
print "Redundancies fixed:", n
print
# transfer peaks mode (from idr/peaks to data/peaks):
if option.mode == "transfer.peaks":
print
# define input path:
sourcepath = idrpath + "peaks/" + option.source + "/"
# define output path:
general.pathGenerator(peakspath)
# gather peak files and transfer to the IDR peaks folder:
print "Transferring peaks to data folder..."
infiles = os.listdir(sourcepath)
for infile in infiles:
print "Transferring:", infile
command = "cp " + sourcepath + infile + " " + peakspath + infile
os.system(command)
print
if __name__ == "__main__":
main()
print "Completed:", time.asctime(time.localtime())
|
claraya/meTRN
|
python/dataImporter.py
|
Python
|
mit
| 11,052
|
[
"BWA",
"Bowtie"
] |
aebc049b5640c09666bec4f84410de6e6a82ceb2b40151b022b333838ea122ed
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Base class for a wrapper over QWebView/QWebEngineView."""
import enum
import itertools
import functools
from typing import (cast, TYPE_CHECKING, Any, Callable, Iterable, List, Optional,
Sequence, Set, Type, Union)
import attr
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, QUrl, QObject, QSizeF, Qt,
QEvent, QPoint)
from PyQt5.QtGui import QKeyEvent, QIcon
from PyQt5.QtWidgets import QWidget, QApplication, QDialog
from PyQt5.QtPrintSupport import QPrintDialog, QPrinter
from PyQt5.QtNetwork import QNetworkAccessManager
if TYPE_CHECKING:
from PyQt5.QtWebKit import QWebHistory
from PyQt5.QtWebKitWidgets import QWebPage
from PyQt5.QtWebEngineWidgets import QWebEngineHistory, QWebEnginePage
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.keyinput import modeman
from qutebrowser.config import config
from qutebrowser.utils import (utils, objreg, usertypes, log, qtutils,
urlutils, message)
from qutebrowser.misc import miscwidgets, objects, sessions
from qutebrowser.browser import eventfilter, inspector
from qutebrowser.qt import sip
if TYPE_CHECKING:
from qutebrowser.browser import webelem
from qutebrowser.browser.inspector import AbstractWebInspector
tab_id_gen = itertools.count(0)
def create(win_id: int,
private: bool,
parent: QWidget = None) -> 'AbstractTab':
"""Get a QtWebKit/QtWebEngine tab object.
Args:
win_id: The window ID where the tab will be shown.
private: Whether the tab is a private/off the record tab.
parent: The Qt parent to set.
"""
# Importing modules here so we don't depend on QtWebEngine without the
# argument and to avoid circular imports.
mode_manager = modeman.instance(win_id)
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginetab
tab_class: Type[AbstractTab] = webenginetab.WebEngineTab
elif objects.backend == usertypes.Backend.QtWebKit:
from qutebrowser.browser.webkit import webkittab
tab_class = webkittab.WebKitTab
else:
raise utils.Unreachable(objects.backend)
return tab_class(win_id=win_id, mode_manager=mode_manager, private=private,
parent=parent)
def init() -> None:
"""Initialize backend-specific modules."""
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginetab
webenginetab.init()
return
assert objects.backend == usertypes.Backend.QtWebKit, objects.backend
class WebTabError(Exception):
"""Base class for various errors."""
class UnsupportedOperationError(WebTabError):
"""Raised when an operation is not supported with the given backend."""
class TerminationStatus(enum.Enum):
"""How a QtWebEngine renderer process terminated.
Also see QWebEnginePage::RenderProcessTerminationStatus
"""
#: Unknown render process status value gotten from Qt.
unknown = -1
#: The render process terminated normally.
normal = 0
#: The render process terminated with with a non-zero exit status.
abnormal = 1
#: The render process crashed, for example because of a segmentation fault.
crashed = 2
#: The render process was killed, for example by SIGKILL or task manager kill.
killed = 3
@attr.s
class TabData:
"""A simple namespace with a fixed set of attributes.
Attributes:
keep_icon: Whether the (e.g. cloned) icon should not be cleared on page
load.
inspector: The QWebInspector used for this webview.
viewing_source: Set if we're currently showing a source view.
Only used when sources are shown via pygments.
open_target: Where to open the next link.
Only used for QtWebKit.
override_target: Override for open_target for fake clicks (like hints).
Only used for QtWebKit.
pinned: Flag to pin the tab.
fullscreen: Whether the tab has a video shown fullscreen currently.
netrc_used: Whether netrc authentication was performed.
input_mode: current input mode for the tab.
splitter: InspectorSplitter used to show inspector inside the tab.
"""
keep_icon: bool = attr.ib(False)
viewing_source: bool = attr.ib(False)
inspector: Optional['AbstractWebInspector'] = attr.ib(None)
open_target: usertypes.ClickTarget = attr.ib(usertypes.ClickTarget.normal)
override_target: Optional[usertypes.ClickTarget] = attr.ib(None)
pinned: bool = attr.ib(False)
fullscreen: bool = attr.ib(False)
netrc_used: bool = attr.ib(False)
input_mode: usertypes.KeyMode = attr.ib(usertypes.KeyMode.normal)
last_navigation: usertypes.NavigationRequest = attr.ib(None)
splitter: miscwidgets.InspectorSplitter = attr.ib(None)
def should_show_icon(self) -> bool:
return (config.val.tabs.favicons.show == 'always' or
config.val.tabs.favicons.show == 'pinned' and self.pinned)
class AbstractAction:
"""Attribute ``action`` of AbstractTab for Qt WebActions."""
action_class: Type[Union['QWebPage', 'QWebEnginePage']]
action_base: Type[Union['QWebPage.WebAction', 'QWebEnginePage.WebAction']]
def __init__(self, tab: 'AbstractTab') -> None:
self._widget = cast(QWidget, None)
self._tab = tab
def exit_fullscreen(self) -> None:
"""Exit the fullscreen mode."""
raise NotImplementedError
def save_page(self) -> None:
"""Save the current page."""
raise NotImplementedError
def run_string(self, name: str) -> None:
"""Run a webaction based on its name."""
member = getattr(self.action_class, name, None)
if not isinstance(member, self.action_base):
raise WebTabError("{} is not a valid web action!".format(name))
self._widget.triggerPageAction(member)
def show_source(
self,
pygments: bool = False # pylint: disable=redefined-outer-name
) -> None:
"""Show the source of the current page in a new tab."""
raise NotImplementedError
def _show_source_pygments(self) -> None:
def show_source_cb(source: str) -> None:
"""Show source as soon as it's ready."""
# WORKAROUND for https://github.com/PyCQA/pylint/issues/491
# pylint: disable=no-member
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table')
# pylint: enable=no-member
highlighted = pygments.highlight(source, lexer, formatter)
tb = objreg.get('tabbed-browser', scope='window',
window=self._tab.win_id)
new_tab = tb.tabopen(background=False, related=True)
new_tab.set_html(highlighted, self._tab.url())
new_tab.data.viewing_source = True
self._tab.dump_async(show_source_cb)
class AbstractPrinting:
"""Attribute ``printing`` of AbstractTab for printing the page."""
def __init__(self, tab: 'AbstractTab') -> None:
self._widget = cast(QWidget, None)
self._tab = tab
def check_pdf_support(self) -> None:
"""Check whether writing to PDFs is supported.
If it's not supported (by the current Qt version), a WebTabError is
raised.
"""
raise NotImplementedError
def check_preview_support(self) -> None:
"""Check whether showing a print preview is supported.
If it's not supported (by the current Qt version), a WebTabError is
raised.
"""
raise NotImplementedError
def to_pdf(self, filename: str) -> bool:
"""Print the tab to a PDF with the given filename."""
raise NotImplementedError
def to_printer(self, printer: QPrinter,
callback: Callable[[bool], None] = None) -> None:
"""Print the tab.
Args:
printer: The QPrinter to print to.
callback: Called with a boolean
(True if printing succeeded, False otherwise)
"""
raise NotImplementedError
def show_dialog(self) -> None:
"""Print with a QPrintDialog."""
def print_callback(ok: bool) -> None:
"""Called when printing finished."""
if not ok:
message.error("Printing failed!")
diag.deleteLater()
def do_print() -> None:
"""Called when the dialog was closed."""
self.to_printer(diag.printer(), print_callback)
diag = QPrintDialog(self._tab)
if utils.is_mac:
# For some reason we get a segfault when using open() on macOS
ret = diag.exec_()
if ret == QDialog.Accepted:
do_print()
else:
diag.open(do_print)
class AbstractSearch(QObject):
"""Attribute ``search`` of AbstractTab for doing searches.
Attributes:
text: The last thing this view was searched for.
search_displayed: Whether we're currently displaying search results in
this view.
_flags: The flags of the last search (needs to be set by subclasses).
_widget: The underlying WebView widget.
"""
#: Signal emitted when a search was finished
#: (True if the text was found, False otherwise)
finished = pyqtSignal(bool)
#: Signal emitted when an existing search was cleared.
cleared = pyqtSignal()
_Callback = Callable[[bool], None]
def __init__(self, tab: 'AbstractTab', parent: QWidget = None):
super().__init__(parent)
self._tab = tab
self._widget = cast(QWidget, None)
self.text: Optional[str] = None
self.search_displayed = False
def _is_case_sensitive(self, ignore_case: usertypes.IgnoreCase) -> bool:
"""Check if case-sensitivity should be used.
This assumes self.text is already set properly.
Arguments:
ignore_case: The ignore_case value from the config.
"""
assert self.text is not None
mapping = {
usertypes.IgnoreCase.smart: not self.text.islower(),
usertypes.IgnoreCase.never: True,
usertypes.IgnoreCase.always: False,
}
return mapping[ignore_case]
def search(self, text: str, *,
ignore_case: usertypes.IgnoreCase = usertypes.IgnoreCase.never,
reverse: bool = False,
wrap: bool = True,
result_cb: _Callback = None) -> None:
"""Find the given text on the page.
Args:
text: The text to search for.
ignore_case: Search case-insensitively.
reverse: Reverse search direction.
wrap: Allow wrapping at the top or bottom of the page.
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
def clear(self) -> None:
"""Clear the current search."""
raise NotImplementedError
def prev_result(self, *, result_cb: _Callback = None) -> None:
"""Go to the previous result of the current search.
Args:
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
def next_result(self, *, result_cb: _Callback = None) -> None:
"""Go to the next result of the current search.
Args:
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
class AbstractZoom(QObject):
"""Attribute ``zoom`` of AbstractTab for controlling zoom."""
def __init__(self, tab: 'AbstractTab', parent: QWidget = None) -> None:
super().__init__(parent)
self._tab = tab
self._widget = cast(QWidget, None)
# Whether zoom was changed from the default.
self._default_zoom_changed = False
self._init_neighborlist()
config.instance.changed.connect(self._on_config_changed)
self._zoom_factor = float(config.val.zoom.default) / 100
@pyqtSlot(str)
def _on_config_changed(self, option: str) -> None:
if option in ['zoom.levels', 'zoom.default']:
if not self._default_zoom_changed:
factor = float(config.val.zoom.default) / 100
self.set_factor(factor)
self._init_neighborlist()
def _init_neighborlist(self) -> None:
"""Initialize self._neighborlist.
It is a NeighborList with the zoom levels."""
levels = config.val.zoom.levels
self._neighborlist: usertypes.NeighborList[float] = usertypes.NeighborList(
levels, mode=usertypes.NeighborList.Modes.edge)
self._neighborlist.fuzzyval = config.val.zoom.default
def apply_offset(self, offset: int) -> float:
"""Increase/Decrease the zoom level by the given offset.
Args:
offset: The offset in the zoom level list.
Return:
The new zoom level.
"""
level = self._neighborlist.getitem(offset)
self.set_factor(float(level) / 100, fuzzyval=False)
return level
def _set_factor_internal(self, factor: float) -> None:
raise NotImplementedError
def set_factor(self, factor: float, *, fuzzyval: bool = True) -> None:
"""Zoom to a given zoom factor.
Args:
factor: The zoom factor as float.
fuzzyval: Whether to set the NeighborLists fuzzyval.
"""
if fuzzyval:
self._neighborlist.fuzzyval = int(factor * 100)
if factor < 0:
raise ValueError("Can't zoom to factor {}!".format(factor))
default_zoom_factor = float(config.val.zoom.default) / 100
self._default_zoom_changed = (factor != default_zoom_factor)
self._zoom_factor = factor
self._set_factor_internal(factor)
def factor(self) -> float:
return self._zoom_factor
def apply_default(self) -> None:
self._set_factor_internal(float(config.val.zoom.default) / 100)
def reapply(self) -> None:
self._set_factor_internal(self._zoom_factor)
class SelectionState(enum.Enum):
"""Possible states of selection in caret mode.
NOTE: Names need to line up with SelectionState in caret.js!
"""
none = enum.auto()
normal = enum.auto()
line = enum.auto()
class AbstractCaret(QObject):
"""Attribute ``caret`` of AbstractTab for caret browsing."""
#: Signal emitted when the selection was toggled.
selection_toggled = pyqtSignal(SelectionState)
#: Emitted when a ``follow_selection`` action is done.
follow_selected_done = pyqtSignal()
def __init__(self,
tab: 'AbstractTab',
mode_manager: modeman.ModeManager,
parent: QWidget = None) -> None:
super().__init__(parent)
self._widget = cast(QWidget, None)
self._mode_manager = mode_manager
mode_manager.entered.connect(self._on_mode_entered)
mode_manager.left.connect(self._on_mode_left)
self._tab = tab
def _on_mode_entered(self, mode: usertypes.KeyMode) -> None:
raise NotImplementedError
def _on_mode_left(self, mode: usertypes.KeyMode) -> None:
raise NotImplementedError
def move_to_next_line(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_prev_line(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_next_char(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_prev_char(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_end_of_word(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_next_word(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_prev_word(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_start_of_line(self) -> None:
raise NotImplementedError
def move_to_end_of_line(self) -> None:
raise NotImplementedError
def move_to_start_of_next_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_start_of_prev_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_end_of_next_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_end_of_prev_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_start_of_document(self) -> None:
raise NotImplementedError
def move_to_end_of_document(self) -> None:
raise NotImplementedError
def toggle_selection(self, line: bool = False) -> None:
raise NotImplementedError
def drop_selection(self) -> None:
raise NotImplementedError
def selection(self, callback: Callable[[str], None]) -> None:
raise NotImplementedError
def reverse_selection(self) -> None:
raise NotImplementedError
def _follow_enter(self, tab: bool) -> None:
"""Follow a link by faking an enter press."""
if tab:
self._tab.fake_key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
else:
self._tab.fake_key_press(Qt.Key_Enter)
def follow_selected(self, *, tab: bool = False) -> None:
raise NotImplementedError
class AbstractScroller(QObject):
"""Attribute ``scroller`` of AbstractTab to manage scroll position."""
#: Signal emitted when the scroll position changed (int, int)
perc_changed = pyqtSignal(int, int)
#: Signal emitted before the user requested a jump.
#: Used to set the special ' mark so the user can return.
before_jump_requested = pyqtSignal()
def __init__(self, tab: 'AbstractTab', parent: QWidget = None):
super().__init__(parent)
self._tab = tab
self._widget = cast(QWidget, None)
if 'log-scroll-pos' in objects.debug_flags:
self.perc_changed.connect(self._log_scroll_pos_change)
@pyqtSlot()
def _log_scroll_pos_change(self) -> None:
log.webview.vdebug( # type: ignore[attr-defined]
"Scroll position changed to {}".format(self.pos_px()))
def _init_widget(self, widget: QWidget) -> None:
self._widget = widget
def pos_px(self) -> int:
raise NotImplementedError
def pos_perc(self) -> int:
raise NotImplementedError
def to_perc(self, x: int = None, y: int = None) -> None:
raise NotImplementedError
def to_point(self, point: QPoint) -> None:
raise NotImplementedError
def to_anchor(self, name: str) -> None:
raise NotImplementedError
def delta(self, x: int = 0, y: int = 0) -> None:
raise NotImplementedError
def delta_page(self, x: float = 0, y: float = 0) -> None:
raise NotImplementedError
def up(self, count: int = 1) -> None:
raise NotImplementedError
def down(self, count: int = 1) -> None:
raise NotImplementedError
def left(self, count: int = 1) -> None:
raise NotImplementedError
def right(self, count: int = 1) -> None:
raise NotImplementedError
def top(self) -> None:
raise NotImplementedError
def bottom(self) -> None:
raise NotImplementedError
def page_up(self, count: int = 1) -> None:
raise NotImplementedError
def page_down(self, count: int = 1) -> None:
raise NotImplementedError
def at_top(self) -> bool:
raise NotImplementedError
def at_bottom(self) -> bool:
raise NotImplementedError
class AbstractHistoryPrivate:
"""Private API related to the history."""
def serialize(self) -> bytes:
"""Serialize into an opaque format understood by self.deserialize."""
raise NotImplementedError
def deserialize(self, data: bytes) -> None:
"""Deserialize from a format produced by self.serialize."""
raise NotImplementedError
def load_items(self, items: Sequence) -> None:
"""Deserialize from a list of WebHistoryItems."""
raise NotImplementedError
class AbstractHistory:
"""The history attribute of a AbstractTab."""
def __init__(self, tab: 'AbstractTab') -> None:
self._tab = tab
self._history = cast(Union['QWebHistory', 'QWebEngineHistory'], None)
self.private_api = AbstractHistoryPrivate()
def __len__(self) -> int:
raise NotImplementedError
def __iter__(self) -> Iterable:
raise NotImplementedError
def _check_count(self, count: int) -> None:
"""Check whether the count is positive."""
if count < 0:
raise WebTabError("count needs to be positive!")
def current_idx(self) -> int:
raise NotImplementedError
def back(self, count: int = 1) -> None:
"""Go back in the tab's history."""
self._check_count(count)
idx = self.current_idx() - count
if idx >= 0:
self._go_to_item(self._item_at(idx))
else:
self._go_to_item(self._item_at(0))
raise WebTabError("At beginning of history.")
def forward(self, count: int = 1) -> None:
"""Go forward in the tab's history."""
self._check_count(count)
idx = self.current_idx() + count
if idx < len(self):
self._go_to_item(self._item_at(idx))
else:
self._go_to_item(self._item_at(len(self) - 1))
raise WebTabError("At end of history.")
def can_go_back(self) -> bool:
raise NotImplementedError
def can_go_forward(self) -> bool:
raise NotImplementedError
def _item_at(self, i: int) -> Any:
raise NotImplementedError
def _go_to_item(self, item: Any) -> None:
raise NotImplementedError
def back_items(self) -> List[Any]:
raise NotImplementedError
def forward_items(self) -> List[Any]:
raise NotImplementedError
class AbstractElements:
"""Finding and handling of elements on the page."""
_MultiCallback = Callable[[Sequence['webelem.AbstractWebElement']], None]
_SingleCallback = Callable[[Optional['webelem.AbstractWebElement']], None]
_ErrorCallback = Callable[[Exception], None]
def __init__(self, tab: 'AbstractTab') -> None:
self._widget = cast(QWidget, None)
self._tab = tab
def find_css(self, selector: str,
callback: _MultiCallback,
error_cb: _ErrorCallback, *,
only_visible: bool = False) -> None:
"""Find all HTML elements matching a given selector async.
If there's an error, the callback is called with a webelem.Error
instance.
Args:
callback: The callback to be called when the search finished.
error_cb: The callback to be called when an error occurred.
selector: The CSS selector to search for.
only_visible: Only show elements which are visible on screen.
"""
raise NotImplementedError
def find_id(self, elem_id: str, callback: _SingleCallback) -> None:
"""Find the HTML element with the given ID async.
Args:
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
elem_id: The ID to search for.
"""
raise NotImplementedError
def find_focused(self, callback: _SingleCallback) -> None:
"""Find the focused element on the page async.
Args:
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
"""
raise NotImplementedError
def find_at_pos(self, pos: QPoint, callback: _SingleCallback) -> None:
"""Find the element at the given position async.
This is also called "hit test" elsewhere.
Args:
pos: The QPoint to get the element for.
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
"""
raise NotImplementedError
class AbstractAudio(QObject):
"""Handling of audio/muting for this tab."""
muted_changed = pyqtSignal(bool)
recently_audible_changed = pyqtSignal(bool)
def __init__(self, tab: 'AbstractTab', parent: QWidget = None) -> None:
super().__init__(parent)
self._widget = cast(QWidget, None)
self._tab = tab
def set_muted(self, muted: bool, override: bool = False) -> None:
"""Set this tab as muted or not.
Arguments:
override: If set to True, muting/unmuting was done manually and
overrides future automatic mute/unmute changes based on
the URL.
"""
raise NotImplementedError
def is_muted(self) -> bool:
raise NotImplementedError
def is_recently_audible(self) -> bool:
"""Whether this tab has had audio playing recently."""
raise NotImplementedError
class AbstractTabPrivate:
"""Tab-related methods which are only needed in the core.
Those methods are not part of the API which is exposed to extensions, and
should ideally be removed at some point in the future.
"""
def __init__(self, mode_manager: modeman.ModeManager,
tab: 'AbstractTab') -> None:
self._widget = cast(QWidget, None)
self._tab = tab
self._mode_manager = mode_manager
def event_target(self) -> QWidget:
"""Return the widget events should be sent to."""
raise NotImplementedError
def handle_auto_insert_mode(self, ok: bool) -> None:
"""Handle `input.insert_mode.auto_load` after loading finished."""
if not ok or not config.cache['input.insert_mode.auto_load']:
return
cur_mode = self._mode_manager.mode
if cur_mode == usertypes.KeyMode.insert:
return
def _auto_insert_mode_cb(
elem: Optional['webelem.AbstractWebElement']
) -> None:
"""Called from JS after finding the focused element."""
if elem is None:
log.webview.debug("No focused element!")
return
if elem.is_editable():
modeman.enter(self._tab.win_id, usertypes.KeyMode.insert,
'load finished', only_if_normal=True)
self._tab.elements.find_focused(_auto_insert_mode_cb)
def clear_ssl_errors(self) -> None:
raise NotImplementedError
def networkaccessmanager(self) -> Optional[QNetworkAccessManager]:
"""Get the QNetworkAccessManager for this tab.
This is only implemented for QtWebKit.
For QtWebEngine, always returns None.
"""
raise NotImplementedError
def shutdown(self) -> None:
raise NotImplementedError
def run_js_sync(self, code: str) -> None:
"""Run javascript sync.
Result will be returned when running JS is complete.
This is only implemented for QtWebKit.
For QtWebEngine, always raises UnsupportedOperationError.
"""
raise NotImplementedError
def _recreate_inspector(self) -> None:
"""Recreate the inspector when detached to a window.
This is needed to circumvent a QtWebEngine bug (which wasn't
investigated further) which sometimes results in the window not
appearing anymore.
"""
self._tab.data.inspector = None
self.toggle_inspector(inspector.Position.window)
def toggle_inspector(self, position: inspector.Position) -> None:
"""Show/hide (and if needed, create) the web inspector for this tab."""
tabdata = self._tab.data
if tabdata.inspector is None:
tabdata.inspector = inspector.create(
splitter=tabdata.splitter,
win_id=self._tab.win_id)
self._tab.shutting_down.connect(tabdata.inspector.shutdown)
tabdata.inspector.recreate.connect(self._recreate_inspector)
tabdata.inspector.inspect(self._widget.page())
tabdata.inspector.set_position(position)
class AbstractTab(QWidget):
"""An adapter for QWebView/QWebEngineView representing a single tab."""
#: Signal emitted when a website requests to close this tab.
window_close_requested = pyqtSignal()
#: Signal emitted when a link is hovered (the hover text)
link_hovered = pyqtSignal(str)
#: Signal emitted when a page started loading
load_started = pyqtSignal()
#: Signal emitted when a page is loading (progress percentage)
load_progress = pyqtSignal(int)
#: Signal emitted when a page finished loading (success as bool)
load_finished = pyqtSignal(bool)
#: Signal emitted when a page's favicon changed (icon as QIcon)
icon_changed = pyqtSignal(QIcon)
#: Signal emitted when a page's title changed (new title as str)
title_changed = pyqtSignal(str)
#: Signal emitted when this tab was pinned/unpinned (new pinned state as bool)
pinned_changed = pyqtSignal(bool)
#: Signal emitted when a new tab should be opened (url as QUrl)
new_tab_requested = pyqtSignal(QUrl)
#: Signal emitted when a page's URL changed (url as QUrl)
url_changed = pyqtSignal(QUrl)
#: Signal emitted when a tab's content size changed
#: (new size as QSizeF)
contents_size_changed = pyqtSignal(QSizeF)
#: Signal emitted when a page requested full-screen (bool)
fullscreen_requested = pyqtSignal(bool)
#: Signal emitted before load starts (URL as QUrl)
before_load_started = pyqtSignal(QUrl)
# Signal emitted when a page's load status changed
# (argument: usertypes.LoadStatus)
load_status_changed = pyqtSignal(usertypes.LoadStatus)
# Signal emitted before shutting down
shutting_down = pyqtSignal()
# Signal emitted when a history item should be added
history_item_triggered = pyqtSignal(QUrl, QUrl, str)
# Signal emitted when the underlying renderer process terminated.
# arg 0: A TerminationStatus member.
# arg 1: The exit code.
renderer_process_terminated = pyqtSignal(TerminationStatus, int)
# Hosts for which a certificate error happened. Shared between all tabs.
#
# Note that we remember hosts here, without scheme/port:
# QtWebEngine/Chromium also only remembers hostnames, and certificates are
# for a given hostname anyways.
_insecure_hosts: Set[str] = set()
def __init__(self, *, win_id: int,
mode_manager: modeman.ModeManager,
private: bool,
parent: QWidget = None) -> None:
utils.unused(mode_manager) # needed for mypy
self.is_private = private
self.win_id = win_id
self.tab_id = next(tab_id_gen)
super().__init__(parent)
self.registry = objreg.ObjectRegistry()
tab_registry = objreg.get('tab-registry', scope='window',
window=win_id)
tab_registry[self.tab_id] = self
objreg.register('tab', self, registry=self.registry)
self.data = TabData()
self._layout = miscwidgets.WrapperLayout(self)
self._widget = cast(QWidget, None)
self._progress = 0
self._load_status = usertypes.LoadStatus.none
self._tab_event_filter = eventfilter.TabEventFilter(
self, parent=self)
self.backend: Optional[usertypes.Backend] = None
# If true, this tab has been requested to be removed (or is removed).
self.pending_removal = False
self.shutting_down.connect(functools.partial(
setattr, self, 'pending_removal', True))
self.before_load_started.connect(self._on_before_load_started)
def _set_widget(self, widget: QWidget) -> None:
# pylint: disable=protected-access
self._widget = widget
self.data.splitter = miscwidgets.InspectorSplitter(
win_id=self.win_id, main_webview=widget)
self._layout.wrap(self, self.data.splitter)
self.history._history = widget.history()
self.history.private_api._history = widget.history()
self.scroller._init_widget(widget)
self.caret._widget = widget
self.zoom._widget = widget
self.search._widget = widget
self.printing._widget = widget
self.action._widget = widget
self.elements._widget = widget
self.audio._widget = widget
self.private_api._widget = widget
self.settings._settings = widget.settings()
self._install_event_filter()
self.zoom.apply_default()
def _install_event_filter(self) -> None:
raise NotImplementedError
def _set_load_status(self, val: usertypes.LoadStatus) -> None:
"""Setter for load_status."""
if not isinstance(val, usertypes.LoadStatus):
raise TypeError("Type {} is no LoadStatus member!".format(val))
log.webview.debug("load status for {}: {}".format(repr(self), val))
self._load_status = val
self.load_status_changed.emit(val)
def send_event(self, evt: QEvent) -> None:
"""Send the given event to the underlying widget.
The event will be sent via QApplication.postEvent.
Note that a posted event must not be re-used in any way!
"""
# This only gives us some mild protection against re-using events, but
# it's certainly better than a segfault.
if getattr(evt, 'posted', False):
raise utils.Unreachable("Can't re-use an event which was already "
"posted!")
recipient = self.private_api.event_target()
if recipient is None:
# https://github.com/qutebrowser/qutebrowser/issues/3888
log.webview.warning("Unable to find event target!")
return
evt.posted = True # type: ignore[attr-defined]
QApplication.postEvent(recipient, evt)
def navigation_blocked(self) -> bool:
"""Test if navigation is allowed on the current tab."""
return self.data.pinned and config.val.tabs.pinned.frozen
@pyqtSlot(QUrl)
def _on_before_load_started(self, url: QUrl) -> None:
"""Adjust the title if we are going to visit a URL soon."""
qtutils.ensure_valid(url)
url_string = url.toDisplayString()
log.webview.debug("Going to start loading: {}".format(url_string))
self.title_changed.emit(url_string)
@pyqtSlot(QUrl)
def _on_url_changed(self, url: QUrl) -> None:
"""Update title when URL has changed and no title is available."""
if url.isValid() and not self.title():
self.title_changed.emit(url.toDisplayString())
self.url_changed.emit(url)
@pyqtSlot()
def _on_load_started(self) -> None:
self._progress = 0
self.data.viewing_source = False
self._set_load_status(usertypes.LoadStatus.loading)
self.load_started.emit()
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(
self,
navigation: usertypes.NavigationRequest
) -> None:
"""Handle common acceptNavigationRequest code."""
url = utils.elide(navigation.url.toDisplayString(), 100)
log.webview.debug("navigation request: url {}, type {}, is_main_frame "
"{}".format(url,
navigation.navigation_type,
navigation.is_main_frame))
if navigation.is_main_frame:
self.data.last_navigation = navigation
if not navigation.url.isValid():
if navigation.navigation_type == navigation.Type.link_clicked:
msg = urlutils.get_errstring(navigation.url,
"Invalid link clicked")
message.error(msg)
self.data.open_target = usertypes.ClickTarget.normal
log.webview.debug("Ignoring invalid URL {} in "
"acceptNavigationRequest: {}".format(
navigation.url.toDisplayString(),
navigation.url.errorString()))
navigation.accepted = False
@pyqtSlot(bool)
def _on_load_finished(self, ok: bool) -> None:
assert self._widget is not None
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
if sessions.session_manager is not None:
sessions.session_manager.save_autosave()
self.load_finished.emit(ok)
if not self.title():
self.title_changed.emit(self.url().toDisplayString())
self.zoom.reapply()
def _update_load_status(self, ok: bool) -> None:
"""Update the load status after a page finished loading.
Needs to be called by subclasses to trigger a load status update, e.g.
as a response to a loadFinished signal.
"""
url = self.url()
is_https = url.scheme() == 'https'
if not ok:
loadstatus = usertypes.LoadStatus.error
elif is_https and url.host() in self._insecure_hosts:
loadstatus = usertypes.LoadStatus.warn
elif is_https:
loadstatus = usertypes.LoadStatus.success_https
else:
loadstatus = usertypes.LoadStatus.success
self._set_load_status(loadstatus)
@pyqtSlot()
def _on_history_trigger(self) -> None:
"""Emit history_item_triggered based on backend-specific signal."""
raise NotImplementedError
@pyqtSlot(int)
def _on_load_progress(self, perc: int) -> None:
self._progress = perc
self.load_progress.emit(perc)
def url(self, *, requested: bool = False) -> QUrl:
raise NotImplementedError
def progress(self) -> int:
return self._progress
def load_status(self) -> usertypes.LoadStatus:
return self._load_status
def _load_url_prepare(self, url: QUrl) -> None:
qtutils.ensure_valid(url)
self.before_load_started.emit(url)
def load_url(self, url: QUrl) -> None:
raise NotImplementedError
def reload(self, *, force: bool = False) -> None:
raise NotImplementedError
def stop(self) -> None:
raise NotImplementedError
def fake_key_press(self,
key: Qt.Key,
modifier: Qt.KeyboardModifier = Qt.NoModifier) -> None:
"""Send a fake key event to this tab."""
press_evt = QKeyEvent(QEvent.KeyPress, key, modifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.KeyRelease, key, modifier,
0, 0, 0)
self.send_event(press_evt)
self.send_event(release_evt)
def dump_async(self,
callback: Callable[[str], None], *,
plain: bool = False) -> None:
"""Dump the current page's html asynchronously.
The given callback will be called with the result when dumping is
complete.
"""
raise NotImplementedError
def run_js_async(
self,
code: str,
callback: Callable[[Any], None] = None, *,
world: Union[usertypes.JsWorld, int] = None
) -> None:
"""Run javascript async.
The given callback will be called with the result when running JS is
complete.
Args:
code: The javascript code to run.
callback: The callback to call with the result, or None.
world: A world ID (int or usertypes.JsWorld member) to run the JS
in the main world or in another isolated world.
"""
raise NotImplementedError
def title(self) -> str:
raise NotImplementedError
def icon(self) -> None:
raise NotImplementedError
def set_html(self, html: str, base_url: QUrl = QUrl()) -> None:
raise NotImplementedError
def set_pinned(self, pinned: bool) -> None:
self.data.pinned = pinned
self.pinned_changed.emit(pinned)
def __repr__(self) -> str:
try:
qurl = self.url()
url = qurl.toDisplayString(
QUrl.EncodeUnicode) # type: ignore[arg-type]
except (AttributeError, RuntimeError) as exc:
url = '<{}>'.format(exc.__class__.__name__)
else:
url = utils.elide(url, 100)
return utils.get_repr(self, tab_id=self.tab_id, url=url)
def is_deleted(self) -> bool:
assert self._widget is not None
return sip.isdeleted(self._widget)
|
The-Compiler/qutebrowser
|
qutebrowser/browser/browsertab.py
|
Python
|
gpl-3.0
| 41,845
|
[
"VisIt"
] |
151429e823b56774e6dc191d54acaf6263a78649556908b570365a1cca207924
|
# $HeadURL: $
""" PEP
PEP ( Policy Enforcement Point ) is the front-end of the whole Policy System.
Any interaction with it must go through the PEP to ensure a smooth flow.
Firstly, it loads the PDP ( Policy Decision Point ) which actually is the
module doing all dirty work ( finding policies, running them, merging their
results, etc... ). Indeed, the PEP takes the output of the PDP for a given set
of parameters ( decissionParams ) and enforces the actions that apply ( also
determined by the PDP output ).
"""
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.PolicySystem.PDP import PDP
from DIRAC.ResourceStatusSystem.Utilities import Utils
__RCSID__ = '$Id: $'
class PEP:
""" PEP ( Policy Enforcement Point )
"""
def __init__( self, clients = None ):
""" Constructor
examples:
>>> pep = PEP()
>>> pep1 = PEP( { 'ResourceStatusClient' : ResourceStatusClient() } )
>>> pep2 = PEP( { 'ResourceStatusClient' : ResourceStatusClient(), 'ClientY' : None } )
:Parameters:
**clients** - [ None, `dict` ]
dictionary with clients to be used in the commands issued by the policies.
If not defined, the commands will import them. It is a measure to avoid
opening the same connection every time a policy is evaluated.
"""
if clients is None:
clients = {}
# PEP uses internally two of the clients: ResourceStatusClient and ResouceManagementClient
if 'ResourceStatusClient' in clients:
self.rsClient = clients[ 'ResourceStatusClient' ]
else:
self.rsClient = ResourceStatusClient()
if 'ResourceManagementClient' in clients:
self.rmClient = clients[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
self.clients = clients
# Pass to the PDP the clients that are going to be used on the Commands
self.pdp = PDP( clients )
def enforce( self, decisionParams ):
""" Given a dictionary with decisionParams, it is passed to the PDP, which
will return ( in case there is a/are positive match/es ) a dictionary containing
three key-pair values: the original decisionParams ( `decisionParams` ), all
the policies evaluated ( `singlePolicyResults` ) and the computed final result
( `policyCombinedResult` ).
To know more about decisionParams, please read PDP.setup where the decisionParams
are sanitized.
examples:
>>> pep.enforce( { 'element' : 'Site', 'name' : 'MySite' } )
>>> pep.enforce( { 'element' : 'Resource', 'name' : 'myce.domain.ch' } )
:Parameters:
**decisionParams** - `dict`
dictionary with the parameters that will be used to match policies.
"""
# Setup PDP with new parameters dictionary
self.pdp.setup( decisionParams )
# Run policies, get decision, get actions to apply
resDecisions = self.pdp.takeDecision()
if not resDecisions[ 'OK' ]:
gLogger.error( 'PEP: Something went wrong, not enforcing policies for %s' % decisionParams )
return resDecisions
resDecisions = resDecisions[ 'Value' ]
# We take from PDP the decision parameters used to find the policies
decisionParams = resDecisions[ 'decissionParams' ]
policyCombinedResult = resDecisions[ 'policyCombinedResult' ]
singlePolicyResults = resDecisions[ 'singlePolicyResults' ]
# We have run the actions and at this point, we are about to execute the actions.
# One more final check before proceeding
isNotUpdated = self.__isNotUpdated( decisionParams )
if not isNotUpdated[ 'OK' ]:
return isNotUpdated
for policyActionName, policyActionType in policyCombinedResult[ 'PolicyAction' ]:
try:
actionMod = Utils.voimport( 'DIRAC.ResourceStatusSystem.PolicySystem.Actions.%s' % policyActionType )
except ImportError:
gLogger.error( 'Error importing %s action' % policyActionType )
continue
try:
action = getattr( actionMod, policyActionType )
except AttributeError:
gLogger.error( 'Error importing %s action class' % policyActionType )
continue
actionObj = action( policyActionName, decisionParams, policyCombinedResult,
singlePolicyResults, self.clients )
gLogger.debug( ( policyActionName, policyActionType ) )
actionResult = actionObj.run()
if not actionResult[ 'OK' ]:
gLogger.error( actionResult[ 'Message' ] )
return S_OK( resDecisions )
def __isNotUpdated( self, decisionParams ):
""" Checks for the existence of the element as it was passed to the PEP. It may
happen that while being the element processed by the PEP an user through the
web interface or the CLI has updated the status for this particular element. As
a result, the PEP would overwrite whatever the user had set. This check is not
perfect, as still an user action can happen while executing the actions, but
the probability is close to 0. However, if there is an action that takes seconds
to be executed, this must be re-evaluated. !
:Parameters:
**decisionParams** - `dict`
dictionary with the parameters that will be used to match policies
:return: S_OK / S_ERROR
"""
# Copy original dictionary and get rid of one key we cannot pass as kwarg
selectParams = decisionParams.copy()
del selectParams[ 'element' ]
del selectParams[ 'active' ]
# We expect to have an exact match. If not, then something has changed and
# we cannot proceed with the actions.
unchangedRow = self.rsClient.selectStatusElement( decisionParams[ 'element' ],
'Status', **selectParams )
if not unchangedRow[ 'OK' ]:
return unchangedRow
if not unchangedRow[ 'Value' ]:
msg = '%(name)s ( %(status)s / %(statusType)s ) has been updated after PEP started running'
return S_ERROR( msg % selectParams )
return S_OK()
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
Sbalbp/DIRAC
|
ResourceStatusSystem/PolicySystem/PEP.py
|
Python
|
gpl-3.0
| 6,634
|
[
"DIRAC"
] |
a6557f7d178ab17404a9c1bf7a3745be5e07b8edec7fc0e8dfdbca3faf3fac96
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: Nicolas P. Rougier (Nicolas.Rougier@inria.fr)
#
# DANA is a computing framework for the simulation of distributed,
# asynchronous, numerical and adaptive models.
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
'''
A Distributed Computational Model of Spatial Memory Anticipation During a
Visual Search Task. Anticipatory Behavior in Adaptive Learning Systems: From
Brains to Individual and Social Behavior, Springer - LNAI 4520, M.V. Butz and
O. Sigaud and G. Baldassarre and G. Pezzulo . (2006)
This script implements the model presented in [1]_ which is performing a
sequential search task with saccadic eye movements.
References
----------
_[1] http://dx.doi.org/10.1007/978-3-540-74262-3_10
'''
from dana import *
from display import *
# SigmaPiConnection object
# ------------------------
class SigmaPiConnection(Connection):
def __init__(self, source=None, modulator=None, target=None, scale=1.0, direction=1):
Connection.__init__(self, source, target)
self._scale = scale
self._direction = +1
if direction < 0:
self._direction = -1
names = modulator.dtype.names
if names == None:
self._actual_modulator = modulator
else:
self._actual_modulator = (modulator[names[0]])
def output(self):
src = self._actual_source
mod = self._actual_modulator
tgt = self._actual_target
R = np.zeros(tgt.shape)
if len(tgt.shape) == len(src.shape) == len(mod.shape) == 1:
R = convolve1d(src,mod[::self._direction])
elif len(tgt.shape) == len(src.shape) == len(mod.shape) == 2:
R = convolve2d(src,mod[::self._direction,::self._direction])
else:
raise NotImplemented
return R*self._scale
# Simulation parameters
# ---------------------
n = 40
dt = 0.5
stimuli_size = 0.13
noise_level = 0.05
# Build groups
# ------------
visual = np.zeros((n,n))
tau_f = 1.0/3.0
focus = Group((n,n), '''dU/dt = tau_f*(-U + Ii + Iwm + L - 0.05) : float
V = minimum(maximum(U,0),1) : float
Ii: float; Iwm: float; L: float''')
tau_w = 1.0/1.5
wm = Group((n,n), '''dU/dt = tau_w*(-U + L + Ii + If + Ia + It -0.25) : float
V = minimum(maximum(U,0),1) : float
Ii: float; If: float; Ia: float; It: float; L: float''')
thal_wm = Group((n,n), '''dU/dt = (-U + L + I) : float
V = minimum(maximum(U,0),1) : float
I : float; L : float''')
anticipation = Group((n,n), '''dU/dt = (-U + I) : float
V = minimum(maximum(U,0),1) : float
I : float; L : float''')
# Connections
# -----------
s = (2*n+1,2*n+1)
K = 0.13*gaussian(s,2.83/n)-0.046*gaussian(s, 17.68/n); K[n,n] = 0.0
SharedConnection(visual, focus('Ii'), +0.018*gaussian(s, 1.42/n))
SharedConnection(focus('V'), focus('L'), K)
SharedConnection(wm('V'), focus('Iwm'), -0.005*gaussian(s, 4.24/n))
K = 0.185*gaussian(s, 1.77/n)-0.11*gaussian(s, 2.83/n); K[n,n] = 0.0
SharedConnection(visual, wm('Ii'), +0.021*gaussian(s, 1.42/n) )
SharedConnection(focus('V'), wm('If'), +0.023*gaussian(s, 1.42/n) )
SharedConnection(thal_wm('V'), wm('It'), +0.195*gaussian(s, 1.06/n) )
SharedConnection(anticipation('V'), wm('Ia'), +0.023*gaussian(s, 1.42/n) )
SharedConnection(wm('V'), wm('L'), K)
SharedConnection(wm('V'), thal_wm('I'), 0.195*gaussian(s, 1.06/n) )
SigmaPiConnection(wm('V'), focus('V'), anticipation('I'),scale=0.05)
K = 0.4*gaussian(s, 2.12/n) - 0.2*gaussian(s, 2.83/n); K[n,n] = 0.0
SharedConnection(anticipation('V'), anticipation('L'), K)
def evaluate(epochs):
global visual, stimuli, stimuli_size
for i in range(epochs):
encode(visual, stimuli, stimuli_size)
run(time=dt,dt=dt)
update()
plt.draw()
def decode(Z):
s = Z.sum()
if s == 0:
return 0, 0
xmin, xmax = -1.0, 1.0
ymin, ymax = -1.0, 1.0
x = (Z.sum(axis=0)*np.linspace(xmin,xmax,Z.shape[1])).sum()
y = (Z.sum(axis=1)*np.linspace(ymin,ymax,Z.shape[0]).T).sum()
return y/s, x/s
def encode(Z, stimuli, size):
Z[...] = 0
for i in range(stimuli.shape[0]):
Z[...] += gaussian(Z.shape, size, (stimuli[i][0],stimuli[i][1]))
Z += np.random.uniform(-noise_level, noise_level, Z.shape)
Z = np.maximum(np.minimum(Z,1),0)
def make_saccade(command, output, stimuli, size):
stimuli -= decode(command)
encode(output, stimuli, size)
def demo():
global focus, visual, stimuli, stimuli_size
for i in range(3):
evaluate(100)
make_saccade(focus['V'], visual, stimuli, stimuli_size)
evaluate(100)
# Set visual
# ----------
stimuli = np.array([[0.5*np.sin(0.0*np.pi/3.0), 0.5*np.cos(0.0*np.pi/3.0)],
[0.5*np.sin(2.0*np.pi/3.0), 0.5*np.cos(2.0*np.pi/3.0)],
[0.5*np.sin(4.0*np.pi/3.0), 0.5*np.cos(4.0*np.pi/3.0)]])
# Visualization
# -------------
plt.ion()
fig = plt.figure(figsize=(8,10),facecolor='white')
plot(plt.subplot(321), thal_wm('V'), 'Thal_Wm')
plot(plt.subplot(322), anticipation('V'), 'Anticipation')
plot(plt.subplot(323), wm('V'), 'Wm')
plot(plt.subplot(324), focus('V'), 'Focus')
plot(plt.subplot(325), visual, 'Visual')
plt.connect('button_press_event', button_press_event)
plt.draw()
demo()
plt.show()
|
rougier/dana
|
examples/overt-attention.py
|
Python
|
bsd-3-clause
| 7,064
|
[
"Gaussian"
] |
48cf2e0ecf93c493ce6b75edaea1a469324f34e19b2ce205ea57d29990782354
|
"""
Gaussian Process Factor Analysis (GPFA) plots
---------------------------------------------
Visualizes transformed trajectories output from
:class:`elephant.gpfa.gpfa.GPFA`
.. autosummary::
:toctree: toctree/gpfa/
plot_dimensions_vs_time
plot_trajectories
plot_trajectories_spikeplay
plot_cumulative_shared_covariance
plot_transform_matrix
"""
# Copyright 2017-2022 by the Viziphant team, see `doc/authors.rst`.
# License: Modified BSD, see LICENSE.txt for details.
import itertools
import math
import matplotlib.pyplot as plt
import neo
import numpy as np
import matplotlib.animation as animation
from matplotlib.lines import Line2D
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.axes_grid1 import make_axes_locatable
from elephant.conversion import BinnedSpikeTrain
from elephant.utils import check_neo_consistency
def plot_cumulative_shared_covariance(loading_matrix):
"""
This function plots the cumulative shared covariance. It allows
to visually identify an appropriate number of dimensions which is
small on the one hand, but explains a substantial part of the variance
in the data on the other hand.
Parameters
----------
loading_matrix : np.ndarray
The loading matrix defines the mapping between neural space and
latent state space. It is obtained by fitting a GPFA model and
stored in ``GPFA.params_estimated['C']`` or if orthonormalized
``GPFA.params_estimated['Corth']``.
Returns
-------
fig : matplotlib.figure.Figure
axes : matplotlib.axes.Axes
"""
eigenvalues = np.linalg.eigvals(np.dot(loading_matrix.transpose(),
loading_matrix))
total_variance = np.sum(eigenvalues)
# sort by decreasing variance explained
sorted_eigenvalues = np.sort(np.abs(eigenvalues))[-1::-1]
cumulative_variance = np.cumsum(sorted_eigenvalues / total_variance)
fig, axes = plt.subplots()
axes.plot(cumulative_variance, 'o-')
axes.grid()
axes.set_title('Eigenspectrum of estimated shared covariance matrix')
axes.set_xlabel('Latent Dimensionality')
axes.set_ylabel('Cumulative % of shared variance explained')
return fig, axes
def plot_transform_matrix(loading_matrix, cmap='RdYlGn'):
"""
This function visualizes the loading matrix as a heatmap.
Parameters
----------
loading_matrix : np.ndarray
The loading matrix defines the mapping between neural space and
latent state space. It is obtained by fitting a GPFA model and
stored in ``GPFA.params_estimated['C']`` or if orthonormalized
``GPFA.params_estimated['Corth']``.
cmap : str, optional
Matplotlib imshow colormap.
Default: 'RdYlGn'
Returns
-------
fig : matplotlib.figure.Figure
axes : matplotlib.axes.Axes
"""
fig, axes = plt.subplots()
vmax = np.max(np.abs(loading_matrix))
vmin = -vmax
heatmap = axes.imshow(loading_matrix,
vmin=vmin, vmax=vmax,
aspect='auto',
interpolation='none', cmap=cmap)
axes.set_title('Loading Matrix')
axes.set_ylabel('Neuron ID')
axes.set_xlabel('Latent Variable')
divider = make_axes_locatable(axes)
cax = divider.append_axes("right", size="5%", pad=0.1)
colorbar = plt.colorbar(heatmap, cax=cax)
colorbar.set_label('Latent Variable Weight')
return fig, axes
def plot_dimensions_vs_time(returned_data,
gpfa_instance,
dimensions='all',
orthonormalized_dimensions=True,
n_trials_to_plot=20,
trial_grouping_dict=None,
colors='grey',
plot_single_trajectories=True,
plot_group_averages=False,
n_columns=2,
plot_args_single={'linewidth': 0.3,
'alpha': 0.4,
'linestyle': '-'},
plot_args_average={'linewidth': 2,
'alpha': 1,
'linestyle': 'dashdot'},
figure_args=dict(figsize=(10, 10))):
"""
This function plots all latent space state dimensions versus time.
It is a wrapper for the function plot_single_dimension_vs_time and
places the single plot onto a grid.
Optional visual aids are offered such as grouping the trials and color
coding their traces.
Changes to optics of the plot can be applied by providing respective
dictionaries.
This function is an adaption of the MATLAB implementation
by Byron Yu which was published with his paper:
Yu et al., J Neurophysiol, 2009.
Parameters
----------
returned_data : np.ndarray or dict
When the length of `returned_data` is one, a single np.ndarray,
containing the requested data (the first entry in `returned_data`
keys list), is returned. Otherwise, a dict of multiple np.ndarrays
with the keys identical to the data names in `returned_data` is
returned.
N-th entry of each np.ndarray is a np.ndarray of the following
shape, specific to each data type, containing the corresponding
data for the n-th trial:
`latent_variable_orth`: (#latent_vars, #bins) np.ndarray
`latent_variable`: (#latent_vars, #bins) np.ndarray
`y`: (#units, #bins) np.ndarray
`Vsm`: (#latent_vars, #latent_vars, #bins) np.ndarray
`VsmGP`: (#bins, #bins, #latent_vars) np.ndarray
Note that the num. of bins (#bins) can vary across trials,
reflecting the trial durations in the given `spiketrains` data.
gpfa_instance : GPFA
Instance of the GPFA() class in elephant, which was used to obtain
`returned_data`.
dimensions : 'all' or int or list of int, optional
Dimensions to plot.
Default: 'all'
orthonormalized_dimensions : bool, optional
Boolean which specifies whether to plot the orthonormalized latent
state space dimension corresponding to the entry 'latent_variable_orth'
in returned data (True) or the unconstrained dimension corresponding
to the entry 'latent_variable' (False).
Beware that the unconstrained state space dimensions 'latent_variable'
are not ordered by their explained variance. These dimensions each
represent one Gaussian process timescale $\tau$.
On the contrary, the orthonormalized dimensions 'latent_variable_orth'
are ordered by decreasing explained variance, allowing a similar
intuitive interpretation to the dimensions obtained in a PCA. Due to
the orthonormalization, these dimensions reflect mixtures of
timescales.
Default: True
n_trials_to_plot : int, optional
Number of single trial trajectories to plot.
Default: 20
trial_grouping_dict : dict or None
Dictionary which specifies the groups of trials which belong together
(e.g. due to same trial type). Each item specifies one group: its
key defines the group name (which appears in the legend) and the
corresponding value is a list or np.ndarray of trial IDs.
Default: None
colors : str or list of str, optional
List of strings specifying the colors of the different trial groups.
The length of this list should correspond to the number of items
in trial_grouping_dict. In case a string is given, all trials will
share the same color unless `trial_grouping_dict` is specified, in
which case colors will be set automatically to correspond to individual
groups.
Default: 'grey'
plot_single_trajectories : bool, optional
If True, single trial trajectories are plotted.
Default: True
plot_group_averages : bool, optional
If True, trajectories of those trials belonging together specified
in the trial_grouping_dict are averaged and plotted.
Default: False
n_columns : int, optional
Number of columns of the grid onto which the single plots are placed.
The number of rows are deduced from the number of dimensions
to be plotted.
Default: 2
plot_args_single : dict, optional
Arguments dictionary passed to ax.plot() of the single trajectories.
plot_args_average : dict, optional
Arguments dictionary passed to ax.plot() of the average trajectories.
figure_args : dict, optional
Arguments dictionary passed to matplotlib.pyplot.figure(),
if ax is None.
Returns
-------
fig : matplotlib.figure.Figure
axes : matplotlib.axes.Axes
Examples
--------
In the following example, we calculate the neural trajectories of 20
independent Poisson spike trains recorded in 50 trials with randomized
rates up to 100 Hz and plot the resulting orthonormalized latent state
space dimensions.
.. plot::
:include-source:
import numpy as np
import quantities as pq
from elephant.gpfa import GPFA
from elephant.spike_train_generation import homogeneous_poisson_process
from viziphant.gpfa import plot_dimensions_vs_time
np.random.seed(24)
n_trials = 10
n_channels = 5
data = []
for trial in range(n_trials):
firing_rates = np.random.randint(low=1, high=100,
size=n_channels) * pq.Hz
spike_times = [homogeneous_poisson_process(rate=rate)
for rate in firing_rates]
data.append(spike_times)
gpfa = GPFA(bin_size=20 * pq.ms, x_dim=3, verbose=False)
gpfa.fit(data)
results = gpfa.transform(data, returned_data=['latent_variable_orth',
'latent_variable'])
plot_dimensions_vs_time(
returned_data=results,
gpfa_instance=gpfa,
dimensions=[0, 2],
orthonormalized_dimensions=True,
n_columns=1)
plt.show()
"""
if dimensions == 'all':
dimensions = list(range(gpfa_instance.x_dim))
elif isinstance(dimensions, int):
dimensions = [dimensions]
n_columns = min(n_columns, len(dimensions))
# deduce n_rows from n_columns
n_rows = math.ceil(len(dimensions) / n_columns)
fig, axes = plt.subplots(nrows=n_rows, ncols=n_columns,
sharex=True, sharey=True, **figure_args)
axes = np.atleast_2d(axes)
if axes.shape[0] == 1:
# (1, n) -> (n, 1)
axes = axes.T
data = _check_input_data(returned_data, orthonormalized_dimensions)
if trial_grouping_dict is None:
trial_grouping_dict = {}
colors = _check_colors(colors, trial_grouping_dict, n_trials=data.shape[0])
n_trials = data.shape[0]
bin_size = gpfa_instance.bin_size.item()
for dimension_index, axis in zip(dimensions, np.ravel(axes)):
if plot_single_trajectories:
for trial_idx in range(min(n_trials, n_trials_to_plot)):
dat = data[trial_idx]
key_id, trial_type = _get_trial_type(trial_grouping_dict,
trial_idx)
# plot single trial trajectories
times = np.arange(1, dat.shape[1] + 1) * bin_size
axis.plot(times,
dat[dimension_index, :],
color=colors[key_id],
label=trial_type,
**plot_args_single)
if plot_group_averages:
for color, trial_type in zip(colors, trial_grouping_dict.keys()):
group_average = data[trial_grouping_dict[trial_type]].mean()
times = np.arange(1, group_average.shape[1] + 1) * bin_size
axis.plot(times,
group_average[dimension_index],
color=color,
label=trial_type,
**plot_args_average)
_set_title_dimensions_vs_time(
ax=axis,
latent_variable_idx=dimension_index,
orthonormalized_dimensions=orthonormalized_dimensions,
data=data,
gpfa_instance=gpfa_instance)
_show_unique_legend(axes=axes[0, 0])
plt.tight_layout()
for axis in axes[-1, :]:
axis.set_xlabel(f'Time ({gpfa_instance.bin_size.dimensionality})')
return fig, axes
def plot_trajectories(returned_data,
gpfa_instance,
dimensions=[0, 1],
block_with_cut_trials=None,
neo_event_name=None,
relevant_events=None,
orthonormalized_dimensions=True,
n_trials_to_plot=20,
trial_grouping_dict=None,
colors='grey',
plot_group_averages=False,
plot_args_single={'linewidth': 0.3,
'alpha': 0.4,
'linestyle': '-'},
plot_args_marker={'alpha': 0.4,
'markersize': 5},
plot_args_average={'linewidth': 2,
'alpha': 1,
'linestyle': 'dashdot'},
plot_args_marker_start={'marker': 'p',
'markersize': 10,
'label': 'start'},
figure_kwargs=dict()):
"""
This function allows for 2D and 3D visualization of the latent space
variables identified by the GPFA.
Optional visual aids are offered such as grouping the trials and color
coding their traces.
Changes to optics of the plot can be applied by providing respective
dictionaries.
This function is an adaption of the MATLAB implementation
by Byron Yu which was published with his paper:
Yu et al., J Neurophysiol, 2009.
Parameters
----------
returned_data : np.ndarray or dict
When the length of `returned_data` is one, a single np.ndarray,
containing the requested data (the first entry in `returned_data`
keys list), is returned. Otherwise, a dict of multiple np.ndarrays
with the keys identical to the data names in `returned_data` is
returned.
N-th entry of each np.ndarray is a np.ndarray of the following
shape, specific to each data type, containing the corresponding
data for the n-th trial:
`latent_variable_orth`: (#latent_vars, #bins) np.ndarray
`latent_variable`: (#latent_vars, #bins) np.ndarray
`y`: (#units, #bins) np.ndarray
`Vsm`: (#latent_vars, #latent_vars, #bins) np.ndarray
`VsmGP`: (#bins, #bins, #latent_vars) np.ndarray
Note that the num. of bins (#bins) can vary across trials,
reflecting the trial durations in the given `spiketrains` data.
gpfa_instance : GPFA
Instance of the GPFA() class in elephant, which was used to obtain
returned_data.
dimensions : list of int, optional
List specifying the indices of the dimensions to use for the
2D or 3D plot.
Default: [0, 1]
block_with_cut_trials : neo.Block or None, optional
The neo.Block should contain each single trial as a separate
neo.Segment including the neo.Event with a specified
`neo_event_name`.
Default: None
neo_event_name : str or None, optional
A string specifying the name of the neo.Event which should be used
to identify the event times and labels of the `relevant_events`.
Default: None
relevant_events : list of str or None, optional
List of names of the event labels that should be plotted onto each
single trial trajectory.
Default: None
orthonormalized_dimensions : bool, optional
Boolean which specifies whether to plot the orthonormalized latent
state space dimension corresponding to the entry 'latent_variable_orth'
in returned data (True) or the unconstrained dimension corresponding
to the entry 'latent_variable' (False).
Beware that the unconstrained state space dimensions 'latent_variable'
are not ordered by their explained variance. These dimensions each
represent one Gaussian process timescale $\tau$.
On the contrary, the orthonormalized dimensions 'latent_variable_orth'
are ordered by decreasing explained variance, allowing a similar
intuitive interpretation to the dimensions obtained in a PCA. Due to
the orthonormalization, these dimensions reflect mixtures of
timescales.
Default: True
n_trials_to_plot : int, optional
Number of single trial trajectories to plot. If zero, no single trial
trajectories will be shown.
Default: 20
trial_grouping_dict : dict or None, optional
Dictionary which specifies the groups of trials which belong together
(e.g. due to same trial type). Each item specifies one group: its
key defines the group name (which appears in the legend) and the
corresponding value is a list or np.ndarray of trial IDs.
Default: None
colors : str or list of str, optional
List of strings specifying the colors of the different trial groups.
The length of this list should correspond to the number of items
in trial_grouping_dict. In case a string is given, all trials will
share the same color unless `trial_grouping_dict` is specified, in
which case colors will be set automatically to correspond to individual
groups.
Default: 'grey'
plot_group_averages : bool, optional
If True, trajectories of those trials belonging together specified
in the trial_grouping_dict are averaged and plotted.
Default: False
plot_args_single : dict, optional
Arguments dictionary passed to ax.plot() of the single trajectories.
plot_args_marker : dict, optional
Arguments dictionary passed to ax.plot() for the single trial events.
plot_args_average : dict, optional
Arguments dictionary passed to ax.plot() of the average trajectories.
if ax is None.
plot_args_marker_start : dict, optional
Arguments dictionary passed to ax.plot() for the marker of the
average trajectory start.
figure_kwargs : dict, optional
Arguments dictionary passed to ``plt.figure()``.
Default: {}
Returns
-------
fig : matplotlib.figure.Figure
axes : matplotlib.axes.Axes
Examples
--------
In the following example, we calculate the neural trajectories of 20
independent Poisson spike trains recorded in 50 trials with randomized
rates up to 100 Hz and plot the resulting orthonormalized latent state
space dimensions.
>>> import numpy as np
>>> import quantities as pq
>>> from elephant.gpfa import GPFA
>>> from elephant.spike_train_generation import homogeneous_poisson_process
>>> from viziphant.gpfa import plot_trajectories
>>> data = []
>>> for trial in range(50):
>>> n_channels = 20
>>> firing_rates = np.random.randint(low=1, high=100,
... size=n_channels) * pq.Hz
>>> spike_times = [homogeneous_poisson_process(rate=rate)
... for rate in firing_rates]
>>> data.append(spike_times)
...
>>> gpfa = GPFA(bin_size=20*pq.ms, x_dim=8)
>>> gpfa.fit(data)
>>> results = gpfa.transform(data, returned_data=['latent_variable_orth',
... 'latent_variable'])
>>> trial_id_lists = np.arange(50).reshape(5,10)
>>> trial_group_names = ['A', 'B', 'C', 'D', 'E']
>>> trial_grouping_dict = {}
>>> for trial_group_name, trial_id_list in zip(trial_group_names,
... trial_id_lists):
>>> trial_grouping_dict[trial_group_name] = trial_id_list
...
>>> plot_trajectories(
... results,
... gpfa,
... dimensions=[0,1,2],
... trial_grouping_dict=trial_grouping_dict,
... plot_group_averages=True)
"""
# prepare the input
projection, n_dimensions = _check_dimensions(gpfa_instance, dimensions)
data = _check_input_data(returned_data, orthonormalized_dimensions)
if trial_grouping_dict is None:
trial_grouping_dict = {}
colors = _check_colors(colors, trial_grouping_dict, n_trials=data.shape[0])
# infer n_trial from shape of the data
n_trials = data.shape[0]
# initialize figure and axis
fig = plt.figure(**figure_kwargs)
axes = fig.gca(projection=projection, aspect='auto')
# loop over trials
for trial_idx in range(min(n_trials, n_trials_to_plot)):
dat = data[trial_idx][dimensions, :]
key_id, trial_type = _get_trial_type(trial_grouping_dict,
trial_idx)
color = colors[key_id]
axes.plot(*dat,
color=color,
label=trial_type,
**plot_args_single)
# plot single trial events
if block_with_cut_trials and neo_event_name and relevant_events:
time_bins_with_relevant_event, relevant_event_labels = \
_get_event_times_and_labels(block_with_cut_trials,
trial_idx,
neo_event_name,
relevant_events,
gpfa_instance)
marker = itertools.cycle(Line2D.filled_markers)
for event_time, event_label in zip(
time_bins_with_relevant_event,
relevant_event_labels):
dat_event = [[dat_dim[event_time]] for dat_dim in dat]
axes.plot(*dat_event,
marker=next(marker),
label=event_label,
color=color,
**plot_args_marker)
if plot_group_averages:
for color, trial_type in zip(colors, trial_grouping_dict.keys()):
group_average = data[trial_grouping_dict[trial_type]].mean()
group_average = group_average[dimensions, :]
axes.plot(*group_average,
color=color,
label=trial_type,
**plot_args_average)
axes.plot(*group_average[:, 0],
color=color,
**plot_args_marker_start)
_set_axis_labels_trajectories(axes,
orthonormalized_dimensions,
dimensions)
_show_unique_legend(axes=axes)
plt.tight_layout()
return fig, axes
def plot_trajectories_spikeplay(spiketrains,
returned_data,
gpfa_instance,
dimensions=[0, 1],
speed=0.2,
orthonormalized_dimensions=True,
n_trials_to_plot=20,
trial_grouping_dict=None,
colors='grey',
plot_group_averages=False,
hide_irrelevant_neurons=False,
plot_args_single={'linewidth': 0.3,
'alpha': 0.4,
'linestyle': '-'},
plot_args_average={'linewidth': 2,
'alpha': 1,
'linestyle': 'dashdot'},
plot_args_marker_start={'marker': 'p',
'markersize': 10,
'label': 'start'},
eventplot_kwargs=dict(),
slider_kwargs=dict(),
animation_kwargs=dict(blit=True, repeat=True),
figure_kwargs=dict()):
r"""
This function allows for 2D and 3D visualization of the latent space
variables identified by the GPFA.
Optional visual aids are offered such as grouping the trials and color
coding their traces.
Changes to optics of the plot can be applied by providing respective
dictionaries.
This function is an adaption of the MATLAB implementation
by Byron Yu which was published with his paper:
Yu et al., J Neurophysiol, 2009.
Parameters
----------
returned_data : np.ndarray or dict
When the length of `returned_data` is one, a single np.ndarray,
containing the requested data (the first entry in `returned_data`
keys list), is returned. Otherwise, a dict of multiple np.ndarrays
with the keys identical to the data names in `returned_data` is
returned.
N-th entry of each np.ndarray is a np.ndarray of the following
shape, specific to each data type, containing the corresponding
data for the n-th trial:
`latent_variable_orth`: (#latent_vars, #bins) np.ndarray
`latent_variable`: (#latent_vars, #bins) np.ndarray
`y`: (#units, #bins) np.ndarray
`Vsm`: (#latent_vars, #latent_vars, #bins) np.ndarray
`VsmGP`: (#bins, #bins, #latent_vars) np.ndarray
Note that the num. of bins (#bins) can vary across trials,
reflecting the trial durations in the given `spiketrains` data.
gpfa_instance : GPFA
Instance of the GPFA() class in elephant, which was used to obtain
returned_data.
dimensions : list of int, optional
List specifying the indices of the dimensions to use for the
2D or 3D plot.
Default: [0, 1]
speed : float, optional
The animation speed.
Default: 0.2
orthonormalized_dimensions : bool, optional
Boolean which specifies whether to plot the orthonormalized latent
state space dimension corresponding to the entry 'latent_variable_orth'
in returned data (True) or the unconstrained dimension corresponding
to the entry 'latent_variable' (False).
Beware that the unconstrained state space dimensions 'latent_variable'
are not ordered by their explained variance. These dimensions each
represent one Gaussian process timescale $\tau$.
On the contrary, the orthonormalized dimensions 'latent_variable_orth'
are ordered by decreasing explained variance, allowing a similar
intuitive interpretation to the dimensions obtained in a PCA. Due to
the orthonormalization, these dimensions reflect mixtures of
timescales.
Default: True
n_trials_to_plot : int, optional
Number of single trial trajectories to plot. If zero, no single trial
trajectories will be shown.
Default: 20
trial_grouping_dict : dict or None, optional
Dictionary which specifies the groups of trials which belong together
(e.g. due to same trial type). Each item specifies one group: its
key defines the group name (which appears in the legend) and the
corresponding value is a list or np.ndarray of trial IDs.
Default: None
colors : str or list of str, optional
List of strings specifying the colors of the different trial groups.
The length of this list should correspond to the number of items
in trial_grouping_dict. In case a string is given, all trials will
share the same color unless `trial_grouping_dict` is specified, in
which case colors will be set automatically to correspond to individual
groups.
Default: 'grey'
plot_group_averages : bool, optional
If True, trajectories of those trials belonging together specified
in the trial_grouping_dict are averaged and plotted.
Default: False
hide_irrelevant_neurons : bool, optional
If True, neural activity will be shaded according to the influence
of a neuron on the chosen latent `dimensions`. The influence is
estimated as a normalized L1-norm of the columns of the pseudo-inverse
of `Corth` matrix:
.. math::
X \approx C_{\text{orth}}^{\dagger} Y
where :math:`Y` is (zero-mean) neuronal firing rates, estimated from
spikes, and :math:`X` - latent variables.
Default: False
plot_args_single : dict, optional
Arguments dictionary passed to ax.plot() of the single trajectories.
plot_args_average : dict, optional
Arguments dictionary passed to ax.plot() of the average trajectories.
if ax is None.
plot_args_marker_start : dict, optional
Arguments dictionary passed to ax.plot() for the marker of the
average trajectory start.
eventplot_kwargs : dict, optional
Arguments dictionary passed to ``plt.eventplot()``.
Default: {}
slider_kwargs : dict, optional
Arguments dictionary for a slider passed to ``ax.axvline()``.
Default: {}
animation_kwargs : dict, optional
Arguments dictionary passed to ``animation.FuncAnimation()``.
figure_kwargs : dict, optional
Arguments dictionary passed to ``plt.figure()``.
Default: {}
Returns
-------
fig : matplotlib.figure.Figure
axes : matplotlib.axes.Axes
spikeplay : matplotlib.animation.FuncAnimation
Matplotlib animation that can be saved in a GIF or a video file.
.. code-block:: python
import matplotlib.animation as animation
spikeplay.save("gpfa.gif")
writergif = animation.FFMpegWriter(fps=60)
spikeplay.save("gpfa.mov", writer=writergif)
"""
# Input spiketrains that were binned must share the same t_start and t_stop
check_neo_consistency(spiketrains, object_type=neo.SpikeTrain)
units = spiketrains[0].units
t_start = spiketrains[0].t_start.item()
# prepare the input
projection, n_dimensions = _check_dimensions(gpfa_instance, dimensions)
data = _check_input_data(returned_data, orthonormalized_dimensions)
if trial_grouping_dict is None:
trial_grouping_dict = {}
colors = _check_colors(colors, trial_grouping_dict, n_trials=data.shape[0])
# infer n_trial from shape of the data
n_trials = data.shape[0]
n_trials_to_plot = min(n_trials, n_trials_to_plot)
# initialize figure and axis
fig = plt.figure(**figure_kwargs)
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2, projection=projection, aspect='auto',
title="GPFA latent trajectories")
if hide_irrelevant_neurons:
Corth = gpfa_instance.params_estimated['Corth']
Corth_inv = np.linalg.pinv(Corth)
l1_norm = np.linalg.norm(Corth_inv[dimensions], ord=1, axis=0)
l1_norm /= l1_norm.max()
# TODO Use efficient vectorized eventplot call once
# https://github.com/matplotlib/matplotlib/issues/19376 is resolved.
for st_id, st in enumerate(spiketrains):
ax1.eventplot(st.magnitude, alpha=l1_norm[st_id],
lineoffsets=st_id + 1, **eventplot_kwargs)
else:
ax1.eventplot([st.magnitude for st in spiketrains], **eventplot_kwargs)
ax1.set_yticks([0, len(spiketrains) - 1])
ax1.set_ylabel("Neuron")
ax1.yaxis.set_label_coords(-0.02, 0.5)
ax1.set_xlabel(f"Time ({units.dimensionality})")
ymin, ymax = ax1.get_ylim()
slider = ax1.axvline(x=t_start, ymin=ymin, ymax=ymax, **slider_kwargs)
bin_size = gpfa_instance.bin_size.rescale(units).item()
empty_data = [[]] * n_dimensions
lines_trials = []
for trial_idx in range(n_trials_to_plot):
dat = data[trial_idx][dimensions, :]
key_id, trial_type = _get_trial_type(trial_grouping_dict,
trial_idx)
ax2.plot(*dat, alpha=0) # to setup the plot limits
color = colors[key_id]
line, = ax2.plot(*empty_data,
color=color,
label=trial_type,
**plot_args_single)
lines_trials.append(line)
lines_groups = []
group_averages = []
if plot_group_averages:
for color, trial_type in zip(colors, trial_grouping_dict.keys()):
group_average = data[trial_grouping_dict[trial_type]].mean()
group_average = group_average[dimensions, :]
group_averages.append(group_average)
line, = ax2.plot(*empty_data,
color=color,
label=trial_type,
**plot_args_average)
lines_groups.append(line)
ax2.plot(*group_average[:, 0],
color=color,
**plot_args_marker_start)
_set_axis_labels_trajectories(ax2,
orthonormalized_dimensions,
dimensions)
_show_unique_legend(axes=ax2)
plt.tight_layout()
def interpolate(data_orig, iteration):
bin_id = int(iteration)
residual = iteration - bin_id
data = data_orig[:, :bin_id]
if bin_id < data_orig.shape[1]:
# append an intermediate point
vec = data_orig[:, bin_id] - data_orig[:, bin_id - 1]
data = np.c_[data,
data_orig[:, bin_id - 1] + vec * residual]
return data
def line_set_data(line, data):
line.set_data(data[0, :], data[1, :])
if n_dimensions == 3:
line.set_3d_properties(data[2, :])
def animate(iteration):
slider.set_xdata(iteration * bin_size + t_start)
bin_id = int(iteration)
if bin_id == 0:
# The first bin dynamics cannot be interpolated due to the
# absence of previous bin dynamics.
return slider,
for data_trial, line in zip(data, lines_trials):
data_trial = interpolate(data_trial[dimensions],
iteration=iteration)
line_set_data(line, data_trial)
for group_average, line in zip(group_averages, lines_groups):
group_average = interpolate(group_average, iteration=iteration)
line_set_data(line, group_average)
artists = [slider, *lines_trials, *lines_groups]
return artists
# GPFA implementation allows different n_bins. So does viziphant.
n_time_bins = gpfa_instance.transform_info['num_bins'].max()
time_steps = np.arange(speed, n_time_bins + speed, speed)
interval = speed * gpfa_instance.bin_size.rescale('ms').item()
spikeplay = animation.FuncAnimation(fig, animate, frames=time_steps,
interval=interval, **animation_kwargs)
return fig, [ax1, ax2], spikeplay
def _check_input_data(returned_data, orthonormalized_dimensions):
# by default returned_data is an array containing the
# orthonormalized posterior mean of latent variable
if isinstance(returned_data, np.ndarray):
return returned_data
if isinstance(returned_data, dict):
if orthonormalized_dimensions:
return returned_data['latent_variable_orth']
if 'latent_variable' in returned_data.keys():
return returned_data['latent_variable']
raise ValueError("The latent variables before "
"orthonormalization 'latent_variable' are not in the "
"returned data")
def _check_colors(colors, trial_grouping_dict, n_trials):
if trial_grouping_dict:
if isinstance(colors, str) or len(colors) != len(trial_grouping_dict):
colors = [f'C{i}' for i in range(len(trial_grouping_dict))]
elif isinstance(colors, str):
colors = [colors] * n_trials
return colors
def _check_dimensions(gpfa_instance, dimensions):
n_dimensions = len(dimensions)
if gpfa_instance.x_dim < n_dimensions:
raise ValueError(f"GPFA trajectories dimensionality "
f"({gpfa_instance.x_dim}) is lower than the "
f"requested ({n_dimensions})")
if n_dimensions not in (2, 3):
raise ValueError("Pick only 2 or 3 dimensions to visualize.")
projection = None if n_dimensions == 2 else '3d'
return projection, n_dimensions
def _get_trial_type(trial_grouping_dict, trial_idx):
for key_id, (trial_type, trial_ids) in enumerate(
trial_grouping_dict.items()):
if trial_idx in trial_ids:
return key_id, trial_type
return 0, None
def _set_title_dimensions_vs_time(ax,
latent_variable_idx,
orthonormalized_dimensions,
data,
gpfa_instance):
if orthonormalized_dimensions:
title = r'$\tilde{{\mathbf{{x}}}}_{{{},:}}$'.format(
latent_variable_idx)
# percentage of variance of the dimensionality reduced data
# that is explained by this latent variable
variances = np.var(np.hstack(data), axis=1)
total_variance = np.sum(variances)
explained_variance = variances[latent_variable_idx] / total_variance
title = title + f'% exp. var.: {explained_variance * 100:.2f} %'
else:
title = r"${{\mathbf{{x}}}}_{{{},:}}$".format(latent_variable_idx)
# time scale of the gaussian process associated to this latent variable
gamma = gpfa_instance.params_estimated['gamma'][latent_variable_idx]
GP_time_scale = np.round(gpfa_instance.bin_size / np.sqrt(gamma), 2)
title = title + rf'$\tau$: {GP_time_scale}'
ax.set_title(title, fontsize=16)
def _set_axis_labels_trajectories(ax,
orthonormalized_dimensions,
dimensions):
if orthonormalized_dimensions:
str1 = rf"$\tilde{{\mathbf{{x}}}}_{{{dimensions[0]}}}$"
str2 = rf"$\tilde{{\mathbf{{x}}}}_{{{dimensions[1]}}}$"
if len(dimensions) == 3:
str3 = rf"$\tilde{{\mathbf{{x}}}}_{{{dimensions[2]}}}$"
else:
str1 = rf"${{\mathbf{{x}}}}_{{{dimensions[0]}}}$"
str2 = rf"${{\mathbf{{x}}}}_{{{dimensions[1]}}}$"
if len(dimensions) == 3:
str3 = rf"${{\mathbf{{x}}}}_{{{dimensions[2]}}}$"
ax.set_xlabel(str1, fontsize=16)
ax.set_ylabel(str2, fontsize=16)
if len(dimensions) == 3:
ax.set_zlabel(str3, fontsize=16)
def _get_event_times_and_labels(block_with_cut_trials,
trial_idx,
neo_event_name,
relevant_events,
gpfa_instance):
trial_events = block_with_cut_trials.segments[trial_idx].filter(
objects='Event',
name=neo_event_name)[0]
# get mask for the relevant events
mask = np.zeros(trial_events.array_annotations['trial_event_labels'].shape,
dtype='bool')
for event in relevant_events:
mask = np.logical_or(
mask,
trial_events.array_annotations['trial_event_labels'] == event)
# cheating by converting event times to binned spiketrain
t_start = block_with_cut_trials.segments[trial_idx].t_start
t_stop = block_with_cut_trials.segments[trial_idx].t_stop
event_spiketrain = neo.SpikeTrain(trial_events.times[mask],
t_start=t_start,
t_stop=t_stop)
bin_size = gpfa_instance.bin_size
binned_event_spiketrain = BinnedSpikeTrain(
event_spiketrain,
bin_size=bin_size).to_array().flatten()
time_bins_with_relevant_event = np.nonzero(binned_event_spiketrain)[0]
relevant_event_labels = \
trial_events.array_annotations['trial_event_labels'][[mask]]
return time_bins_with_relevant_event, relevant_event_labels
def _show_unique_legend(axes):
# only plot unique labels
handles, labels = axes.get_legend_handles_labels()
if len(handles) == 0:
# no labels have been provided
return
by_label = dict(zip(labels, handles))
axes.legend(by_label.values(), by_label.keys())
|
INM-6/viziphant
|
viziphant/gpfa.py
|
Python
|
bsd-3-clause
| 41,325
|
[
"Gaussian",
"NEURON"
] |
17455cdc666848f18e4cc15e8e548b229cd336334bf0ce6868f93a497915b316
|
"""
https://leetcode.com/problems/subdomain-visit-count/
https://leetcode.com/submissions/detail/148073410/
"""
class Solution:
def subdomainVisits(self, cpdomains):
"""
:type cpdomains: List[str]
:rtype: List[str]
"""
dic = dict()
for cpdomain in cpdomains:
[count, domain] = cpdomain.split(' ')
count = int(count)
domains = domain.split('.')
domain = ''
for index in range(len(domains)):
domain = '.'.join(domains[index:])
if (domain not in dic):
dic[domain] = 0
dic[domain] += count
ans = []
for domain in dic:
ans.append(str(dic[domain]) + ' ' + domain)
return ans
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.subdomainVisits(["9001 discuss.leetcode.com"]), [
"9001 discuss.leetcode.com", "9001 leetcode.com", "9001 com"])
if __name__ == '__main__':
unittest.main()
|
vivaxy/algorithms
|
python/problems/subdomain_visit_count.py
|
Python
|
mit
| 1,110
|
[
"VisIt"
] |
77d1638c4acf6bf735fdc6e4071c237b8e2323e30eb1b5f4d4923ebe54d69e38
|
# - coding: utf-8 -
# Copyright (C) 2008-2010 Toms Bauģis <toms.baugis at gmail.com>
# Dual licensed under the MIT or GPL Version 2 licenses.
# See http://github.com/tbaugis/hamster_experiments/blob/master/README.textile
import math
import datetime as dt
import gtk, gobject
import pango, cairo
import re
try:
import pytweener
except: # we can also live without tweener. Scene.animate will not work
pytweener = None
import colorsys
from collections import deque
if cairo.version in ('1.8.2', '1.8.4'):
# in these two cairo versions the matrix multiplication was flipped
# http://bugs.freedesktop.org/show_bug.cgi?id=19221
def cairo_matrix_multiply(matrix1, matrix2):
return matrix2 * matrix1
else:
def cairo_matrix_multiply(matrix1, matrix2):
return matrix1 * matrix2
class Colors(object):
hex_color_normal = re.compile("#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})")
hex_color_short = re.compile("#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])")
hex_color_long = re.compile("#([a-fA-F0-9]{4})([a-fA-F0-9]{4})([a-fA-F0-9]{4})")
def parse(self, color):
assert color is not None
#parse color into rgb values
if isinstance(color, basestring):
match = self.hex_color_long.match(color)
if match:
color = [int(color, 16) / 65535.0 for color in match.groups()]
else:
match = self.hex_color_normal.match(color)
if match:
color = [int(color, 16) / 255.0 for color in match.groups()]
else:
match = self.hex_color_short.match(color)
color = [int(color + color, 16) / 255.0 for color in match.groups()]
elif isinstance(color, gtk.gdk.Color):
color = [color.red / 65535.0,
color.green / 65535.0,
color.blue / 65535.0]
else:
# otherwise we assume we have color components in 0..255 range
if color[0] > 1 or color[1] > 1 or color[2] > 1:
color = [c / 255.0 for c in color]
return color
def rgb(self, color):
return [c * 255 for c in self.parse(color)]
def gdk(self, color):
c = self.parse(color)
return gtk.gdk.Color(int(c[0] * 65535.0), int(c[1] * 65535.0), int(c[2] * 65535.0))
def is_light(self, color):
# tells you if color is dark or light, so you can up or down the
# scale for improved contrast
return colorsys.rgb_to_hls(*self.rgb(color))[1] > 150
def darker(self, color, step):
# returns color darker by step (where step is in range 0..255)
hls = colorsys.rgb_to_hls(*self.rgb(color))
return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2])
def contrast(self, color, step):
"""if color is dark, will return a lighter one, otherwise darker"""
hls = colorsys.rgb_to_hls(*self.rgb(color))
if self.is_light(color):
return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2])
else:
return colorsys.hls_to_rgb(hls[0], hls[1] + step, hls[2])
# returns color darker by step (where step is in range 0..255)
Colors = Colors() # this is a static class, so an instance will do
class Graphics(object):
"""If context is given upon contruction, will perform drawing
operations on context instantly. Otherwise queues up the drawing
instructions and performs them in passed-in order when _draw is called
with context.
Most of instructions are mapped to cairo functions by the same name.
Where there are differences, documenation is provided.
See http://cairographics.org/documentation/pycairo/2/reference/context.html
for detailed description of the cairo drawing functions.
"""
def __init__(self, context = None):
self.context = context
self.colors = Colors # pointer to the color utilities instance
self.extents = None # bounds of the object, only if interactive
self.paths = None # paths for mouse hit checks
self._last_matrix = None
self.__new_instructions = [] # instruction set until it is converted into path-based instructions
self.__instruction_cache = []
self.cache_surface = None
self._cache_layout = None
def clear(self):
"""clear all instructions"""
self.__new_instructions = []
self.__instruction_cache = []
self.paths = []
@staticmethod
def _stroke(context): context.stroke()
def stroke(self, color = None, alpha = 1):
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._stroke,)
@staticmethod
def _fill(context): context.fill()
def fill(self, color = None, alpha = 1):
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._fill,)
@staticmethod
def _mask(context, pattern): context.mask(pattern)
def mask(self, pattern):
self._add_instruction(self._mask, pattern)
@staticmethod
def _stroke_preserve(context): context.stroke_preserve()
def stroke_preserve(self, color = None, alpha = 1):
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._stroke_preserve,)
@staticmethod
def _fill_preserve(context): context.fill_preserve()
def fill_preserve(self, color = None, alpha = 1):
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._fill_preserve,)
@staticmethod
def _new_path(context): context.new_path()
def new_path(self):
self._add_instruction(self._new_path,)
@staticmethod
def _paint(context): context.paint()
def paint(self):
self._add_instruction(self._paint,)
@staticmethod
def _set_font_face(context, face): context.set_font_face(face)
def set_font_face(self, face):
self._add_instruction(self._set_font_face, face)
@staticmethod
def _set_font_size(context, size): context.set_font_size(size)
def set_font_size(self, size):
self._add_instruction(self._set_font_size, size)
@staticmethod
def _set_source(context, image):
context.set_source(image)
def set_source(self, image, x = 0, y = 0):
self._add_instruction(self._set_source, image)
@staticmethod
def _set_source_surface(context, surface, x, y):
context.set_source_surface(surface, x, y)
def set_source_surface(self, surface, x = 0, y = 0):
self._add_instruction(self._set_source_surface, surface, x, y)
@staticmethod
def _set_source_pixbuf(context, pixbuf, x, y):
context.set_source_pixbuf(pixbuf, x, y)
def set_source_pixbuf(self, pixbuf, x = 0, y = 0):
self._add_instruction(self._set_source_pixbuf, pixbuf, x, y)
@staticmethod
def _save_context(context): context.save()
def save_context(self):
self._add_instruction(self._save_context)
@staticmethod
def _restore_context(context): context.restore()
def restore_context(self):
self._add_instruction(self._restore_context)
@staticmethod
def _clip(context): context.clip()
def clip(self):
self._add_instruction(self._clip)
@staticmethod
def _translate(context, x, y): context.translate(x, y)
def translate(self, x, y):
self._add_instruction(self._translate, x, y)
@staticmethod
def _rotate(context, radians): context.rotate(radians)
def rotate(self, radians):
self._add_instruction(self._rotate, radians)
@staticmethod
def _move_to(context, x, y): context.move_to(x, y)
def move_to(self, x, y):
self._add_instruction(self._move_to, x, y)
@staticmethod
def _line_to(context, x, y): context.line_to(x, y)
def line_to(self, x, y = None):
if y is not None:
self._add_instruction(self._line_to, x, y)
elif isinstance(x, list) and y is None:
for x2, y2 in x:
self._add_instruction(self._line_to, x2, y2)
@staticmethod
def _rel_line_to(context, x, y): context.rel_line_to(x, y)
def rel_line_to(self, x, y = None):
if x is not None and y is not None:
self._add_instruction(self._rel_line_to, x, y)
elif isinstance(x, list) and y is None:
for x2, y2 in x:
self._add_instruction(self._rel_line_to, x2, y2)
@staticmethod
def _curve_to(context, x, y, x2, y2, x3, y3):
context.curve_to(x, y, x2, y2, x3, y3)
def curve_to(self, x, y, x2, y2, x3, y3):
"""draw a curve. (x2, y2) is the middle point of the curve"""
self._add_instruction(self._curve_to, x, y, x2, y2, x3, y3)
@staticmethod
def _close_path(context): context.close_path()
def close_path(self):
self._add_instruction(self._close_path,)
@staticmethod
def _set_line_width(context, width):
context.set_line_width(width)
@staticmethod
def _set_dash(context, dash, dash_offset = 0):
context.set_dash(dash, dash_offset)
def set_line_style(self, width = None, dash = None, dash_offset = 0):
"""change width and dash of a line"""
if width is not None:
self._add_instruction(self._set_line_width, width)
if dash is not None:
self._add_instruction(self._set_dash, dash, dash_offset)
def _set_color(self, context, r, g, b, a):
if a < 1:
context.set_source_rgba(r, g, b, a)
else:
context.set_source_rgb(r, g, b)
def set_color(self, color, alpha = 1):
"""set active color. You can use hex colors like "#aaa", or you can use
normalized RGB tripplets (where every value is in range 0..1), or
you can do the same thing in range 0..65535.
also consider skipping this operation and specify the color on stroke and
fill.
"""
color = self.colors.parse(color) # parse whatever we have there into a normalized triplet
if len(color) == 4 and alpha is None:
alpha = color[3]
r, g, b = color[:3]
self._add_instruction(self._set_color, r, g, b, alpha)
@staticmethod
def _arc(context, x, y, radius, start_angle, end_angle):
context.arc(x, y, radius, start_angle, end_angle)
def arc(self, x, y, radius, start_angle, end_angle):
"""draw arc going counter-clockwise from start_angle to end_angle"""
self._add_instruction(self._arc, x, y, radius, start_angle, end_angle)
def circle(self, x, y, radius):
"""draw circle"""
self._add_instruction(self._arc, x, y, radius, 0, math.pi * 2)
def ellipse(self, x, y, width, height, edges = None):
"""draw 'perfect' ellipse, opposed to squashed circle. works also for
equilateral polygons"""
# the automatic edge case is somewhat arbitrary
steps = edges or max((32, width, height)) / 2
angle = 0
step = math.pi * 2 / steps
points = []
while angle < math.pi * 2:
points.append((width / 2.0 * math.cos(angle),
height / 2.0 * math.sin(angle)))
angle += step
min_x = min((point[0] for point in points))
min_y = min((point[1] for point in points))
self.move_to(points[0][0] - min_x + x, points[0][1] - min_y + y)
for p_x, p_y in points:
self.line_to(p_x - min_x + x, p_y - min_y + y)
self.line_to(points[0][0] - min_x + x, points[0][1] - min_y + y)
@staticmethod
def _arc_negative(context, x, y, radius, start_angle, end_angle):
context.arc_negative(x, y, radius, start_angle, end_angle)
def arc_negative(self, x, y, radius, start_angle, end_angle):
"""draw arc going clockwise from start_angle to end_angle"""
self._add_instruction(self._arc_negative, x, y, radius, start_angle, end_angle)
@staticmethod
def _rounded_rectangle(context, x, y, x2, y2, corner_radius):
half_corner = corner_radius / 2
context.move_to(x + corner_radius, y)
context.line_to(x2 - corner_radius, y)
context.curve_to(x2 - half_corner, y, x2, y + half_corner, x2, y + corner_radius)
context.line_to(x2, y2 - corner_radius)
context.curve_to(x2, y2 - half_corner, x2 - half_corner, y2, x2 - corner_radius, y2)
context.line_to(x + corner_radius, y2)
context.curve_to(x + half_corner, y2, x, y2 - half_corner, x, y2 - corner_radius)
context.line_to(x, y + corner_radius)
context.curve_to(x, y + half_corner, x + half_corner, y, x + corner_radius, y)
@staticmethod
def _rectangle(context, x, y, w, h): context.rectangle(x, y, w, h)
def rectangle(self, x, y, width, height, corner_radius = 0):
"draw a rectangle. if corner_radius is specified, will draw rounded corners"
if corner_radius <= 0:
self._add_instruction(self._rectangle, x, y, width, height)
return
# make sure that w + h are larger than 2 * corner_radius
corner_radius = min(corner_radius, min(width, height) / 2)
x2, y2 = x + width, y + height
self._add_instruction(self._rounded_rectangle, x, y, x2, y2, corner_radius)
def fill_area(self, x, y, width, height, color, opacity = 1):
"""fill rectangular area with specified color"""
self.rectangle(x, y, width, height)
self.fill(color, opacity)
def fill_stroke(self, fill = None, stroke = None, line_width = None):
"""fill and stroke the drawn area in one go"""
if line_width: self.set_line_style(line_width)
if fill and stroke:
self.fill_preserve(fill)
elif fill:
self.fill(fill)
if stroke:
self.stroke(stroke)
@staticmethod
def _show_layout(context, layout, text, font_desc, alignment, width, wrap, ellipsize):
layout.set_font_description(font_desc)
layout.set_markup(text)
layout.set_width(int(width or -1))
layout.set_alignment(alignment)
if width > 0:
if wrap is not None:
layout.set_wrap(wrap)
else:
layout.set_ellipsize(ellipsize or pango.ELLIPSIZE_END)
context.show_layout(layout)
def create_layout(self, size = None):
"""utility function to create layout with the default font. Size and
alignment parameters are shortcuts to according functions of the
pango.Layout"""
if not self.context:
# TODO - this is rather sloppy as far as exception goes
# should explain better
raise "Can not create layout without existing context!"
layout = self.context.create_layout()
font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
if size: font_desc.set_size(size * pango.SCALE)
layout.set_font_description(font_desc)
return layout
def show_label(self, text, size = None, color = None):
"""display text with system's default font"""
font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
if color: self.set_color(color)
if size: font_desc.set_size(size * pango.SCALE)
self.show_layout(text, font_desc)
@staticmethod
def _show_text(context, text): context.show_text(text)
def show_text(self, text):
self._add_instruction(self._show_text, text)
@staticmethod
def _text_path(context, text): context.text_path(text)
def text_path(self, text):
"""this function is most likely to change"""
self._add_instruction(self._text_path, text)
def show_layout(self, text, font_desc, alignment = pango.ALIGN_LEFT, width = -1, wrap = None, ellipsize = None):
"""display text. font_desc is string of pango font description
often handier than calling this function directly, is to create
a class:Label object
"""
layout = self._cache_layout = self._cache_layout or gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0))).create_layout()
self._add_instruction(self._show_layout, layout, text, font_desc, alignment, width, wrap, ellipsize)
def _add_instruction(self, function, *params):
if self.context:
function(self.context, *params)
else:
self.paths = None
self.__new_instructions.append((function, params))
def _draw(self, context, opacity):
"""draw accumulated instructions in context"""
# if we have been moved around, we should update bounds
fresh_draw = self.__new_instructions and len(self.__new_instructions) > 0
if fresh_draw: #new stuff!
self.paths = []
self.__instruction_cache = self.__new_instructions
self.__new_instructions = []
else:
if not self.__instruction_cache:
return
for instruction, args in self.__instruction_cache:
if fresh_draw and instruction in (self._new_path, self._stroke, self._fill, self._clip):
self.paths.append(context.copy_path())
if opacity < 1 and instruction == self._set_color:
self._set_color(context, args[0], args[1], args[2], args[3] * opacity)
elif opacity < 1 and instruction == self._paint:
context.paint_with_alpha(opacity)
else:
instruction(context, *args)
def _draw_as_bitmap(self, context, opacity):
"""
instead of caching paths, this function caches the whole drawn thing
use cache_as_bitmap on sprite to enable this mode
"""
matrix = context.get_matrix()
matrix_changed = matrix != self._last_matrix
new_instructions = len(self.__new_instructions) > 0
if new_instructions or matrix_changed:
if new_instructions:
self.__instruction_cache = list(self.__new_instructions)
self.__new_instructions = deque()
self.paths = deque()
self.extents = None
if not self.__instruction_cache:
# no instructions - nothing to do
return
# instructions that end path
path_end_instructions = (self._new_path, self._clip, self._stroke, self._fill, self._stroke_preserve, self._fill_preserve)
# measure the path extents so we know the size of cache surface
# also to save some time use the context to paint for the first time
extents = gtk.gdk.Rectangle()
for instruction, args in self.__instruction_cache:
if instruction in path_end_instructions:
self.paths.append(context.copy_path())
exts = context.path_extents()
exts = gtk.gdk.Rectangle(int(exts[0]), int(exts[1]),
int(exts[2]-exts[0]), int(exts[3]-exts[1]))
if extents.width and extents.height:
extents = extents.union(exts)
else:
extents = exts
if instruction in (self._set_source_pixbuf, self._set_source_surface):
# draw a rectangle around the pathless instructions so that the extents are correct
pixbuf = args[0]
x = args[1] if len(args) > 1 else 0
y = args[2] if len(args) > 2 else 0
self._rectangle(context, x, y, pixbuf.get_width(), pixbuf.get_height())
self._clip()
if instruction == self._paint and opacity < 1:
context.paint_with_alpha(opacity)
elif instruction == self._set_color and opacity < 1:
self._set_color(context, args[0], args[1], args[2], args[3] * opacity)
else:
instruction(context, *args)
# avoid re-caching if we have just moved
just_transforms = new_instructions == False and \
matrix and self._last_matrix \
and all([matrix[i] == self._last_matrix[i] for i in range(4)])
# TODO - this does not look awfully safe
extents.x += matrix[4]
extents.y += matrix[5]
self.extents = extents
if not just_transforms:
# now draw the instructions on the caching surface
w = int(extents.width) + 1
h = int(extents.height) + 1
self.cache_surface = context.get_target().create_similar(cairo.CONTENT_COLOR_ALPHA, w, h)
ctx = gtk.gdk.CairoContext(cairo.Context(self.cache_surface))
ctx.translate(-extents.x, -extents.y)
ctx.transform(matrix)
for instruction, args in self.__instruction_cache:
instruction(ctx, *args)
self._last_matrix = matrix
else:
context.save()
context.identity_matrix()
context.translate(self.extents.x, self.extents.y)
context.set_source_surface(self.cache_surface)
if opacity < 1:
context.paint_with_alpha(opacity)
else:
context.paint()
context.restore()
class Sprite(gtk.Object):
"""The Sprite class is a basic display list building block: a display list
node that can display graphics and can also contain children.
Once you have created the sprite, use Scene's add_child to add it to
scene
"""
__gsignals__ = {
"on-mouse-over": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"on-mouse-out": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"on-mouse-down": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-up": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-click": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-drag-start": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-drag": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-drag-finish": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-render": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ())
}
transformation_attrs = set(('x', 'y', 'rotation', 'scale_x', 'scale_y', 'pivot_x', 'pivot_y'))
visibility_attrs = set(('opacity', 'visible', 'z_order'))
cache_attrs = set(('_stroke_context', '_matrix', '_prev_parent_matrix', '_extents', '_scene'))
graphics_unrelated_attrs = set(('drag_x', 'drag_y', 'sprites', 'mouse_cursor', '_sprite_dirty'))
def __init__(self, x = 0, y = 0,
opacity = 1, visible = True,
rotation = 0, pivot_x = 0, pivot_y = 0,
scale_x = 1, scale_y = 1,
interactive = False, draggable = False,
z_order = 0, mouse_cursor = None,
cache_as_bitmap = False, snap_to_pixel = True):
gtk.Object.__init__(self)
self._scene = None
#: list of children sprites. Use :func:`add_child` to add sprites
self.sprites = []
#: instance of :ref:`graphics` for this sprite
self.graphics = Graphics()
#: boolean denoting whether the sprite responds to mouse events
self.interactive = interactive
#: boolean marking if sprite can be automatically dragged
self.draggable = draggable
#: relative x coordinate of the sprites' rotation point
self.pivot_x = pivot_x
#: relative y coordinates of the sprites' rotation point
self.pivot_y = pivot_y
#: sprite opacity
self.opacity = opacity
#: boolean visibility flag
self.visible = visible
#: pointer to parent :class:`Sprite` or :class:`Scene`
self.parent = None
#: sprite coordinates
self.x, self.y = x, y
#: rotation of the sprite in radians (use :func:`math.degrees` to convert to degrees if necessary)
self.rotation = rotation
#: scale X
self.scale_x = scale_x
#: scale Y
self.scale_y = scale_y
#: drawing order between siblings. The one with the highest z_order will be on top.
self.z_order = z_order
#: mouse-over cursor of the sprite. See :meth:`Scene.mouse_cursor`
#: for possible values
self.mouse_cursor = mouse_cursor
#: x position of the cursor within mouse upon drag. change this value
#: in on-drag-start to adjust drag point
self.drag_x = 0
#: y position of the cursor within mouse upon drag. change this value
#: in on-drag-start to adjust drag point
self.drag_y = 0
#: Whether the sprite should be cached as a bitmap. Default: true
#: Generally good when you have many static sprites
self.cache_as_bitmap = cache_as_bitmap
#: Should the sprite coordinates always rounded to full pixel. Default: true
#: Mostly this is good for performance but in some cases that can lead
#: to rounding errors in positioning.
self.snap_to_pixel = snap_to_pixel
self.__dict__["_sprite_dirty"] = True # flag that indicates that the graphics object of the sprite should be rendered
self.__dict__["_sprite_moved"] = True # flag that indicates that the graphics object of the sprite should be rendered
self._matrix = None
self._prev_parent_matrix = None
self._extents = None
self._prev_extents = None
self._stroke_context = None
def __setattr__(self, name, val):
if self.__dict__.get(name, "hamster_graphics_no_value_really") == val:
return
self.__dict__[name] = val
# prev parent matrix walks downwards
if name == '_prev_parent_matrix' and self.visible:
self._extents = None
# downwards recursive invalidation of parent matrix
for sprite in self.sprites:
sprite._prev_parent_matrix = None
if name in self.cache_attrs or name in self.graphics_unrelated_attrs:
return
"""all the other changes influence cache vars"""
# either transforms or path operations - extents have to be recalculated
self._extents = None
if name == 'visible' and self.visible == False:
# when transforms happen while sprite is invisible
for sprite in self.sprites:
sprite._prev_parent_matrix = None
# on moves invalidate our matrix, child extent cache (as that depends on our transforms)
# as well as our parent's child extents as we moved
# then go into children and invalidate the parent matrix down the tree
if name in self.transformation_attrs:
self._matrix = None
for sprite in self.sprites:
sprite._prev_parent_matrix = None
# if attribute is not in transformation nor visibility, we conclude
# that it must be causing the sprite needs re-rendering
if name not in self.transformation_attrs and name not in self.visibility_attrs:
self.__dict__["_sprite_dirty"] = True
# on parent change invalidate the matrix
if name == 'parent':
self._prev_parent_matrix = None
return
if name == 'opacity' and self.__dict__.get("cache_as_bitmap") and hasattr(self, "graphics"):
# invalidating cache for the bitmap version as that paints opacity in the image
self.graphics._last_matrix = None
if name == 'z_order' and self.__dict__.get('parent'):
self.parent._sort()
self.redraw()
def _sort(self):
"""sort sprites by z_order"""
self.sprites = sorted(self.sprites, key=lambda sprite:sprite.z_order)
def add_child(self, *sprites):
"""Add child sprite. Child will be nested within parent"""
for sprite in sprites:
if sprite == self:
raise Exception("trying to add sprite to itself")
if sprite.parent:
sprite.x, sprite.y = self.from_scene_coords(*sprite.to_scene_coords())
sprite.parent.remove_child(sprite)
self.sprites.append(sprite)
sprite.parent = self
self._sort()
def remove_child(self, *sprites):
for sprite in sprites:
self.sprites.remove(sprite)
sprite._scene = None
sprite.parent = None
def bring_to_front(self):
"""adjusts sprite's z-order so that the sprite is on top of it's
siblings"""
if not self.parent:
return
self.z_order = self.parent.sprites[-1].z_order + 1
def send_to_back(self):
"""adjusts sprite's z-order so that the sprite is behind it's
siblings"""
if not self.parent:
return
self.z_order = self.parent.sprites[0].z_order - 1
def get_extents(self):
"""measure the extents of the sprite's graphics. if context is provided
will use that to draw the paths"""
if self._extents:
return self._extents
if self._sprite_dirty:
# redrawing merely because we need fresh extents of the sprite
context = gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0)))
context.transform(self.get_matrix())
self.emit("on-render")
self.__dict__["_sprite_dirty"] = False
self.graphics._draw(context, 1)
if not self.graphics.paths:
self.graphics._draw(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0)), 1)
if not self.graphics.paths:
return None
context = gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0)))
context.transform(self.get_matrix())
for path in self.graphics.paths:
context.append_path(path)
context.identity_matrix()
ext = context.path_extents()
ext = gtk.gdk.Rectangle(int(ext[0]), int(ext[1]),
int(ext[2] - ext[0]), int(ext[3] - ext[1]))
if not ext.width and not ext.height:
ext = None
self.__dict__['_extents'] = ext
self.__dict__['_stroke_context'] = context
return ext
def check_hit(self, x, y):
"""check if the given coordinates are inside the sprite's fill or stroke
path"""
extents = self.get_extents()
if not extents:
return False
if extents.x <= x <= extents.x + extents.width and extents.y <= y <= extents.y + extents.height:
return self._stroke_context is None or self._stroke_context.in_fill(x, y)
else:
return False
def get_scene(self):
"""returns class:`Scene` the sprite belongs to"""
if not self._scene:
if hasattr(self, 'parent') and self.parent:
if isinstance(self.parent, Sprite) == False:
scene = self.parent
else:
scene = self.parent.get_scene()
self._scene = scene
return self._scene
def redraw(self):
"""queue redraw of the sprite. this function is called automatically
whenever a sprite attribute changes. sprite changes that happen
during scene redraw are ignored in order to avoid echoes.
Call scene.redraw() explicitly if you need to redraw in these cases.
"""
scene = self.get_scene()
if scene and scene._redraw_in_progress == False and self.parent:
self.parent.redraw()
def animate(self, duration = None, easing = None, on_complete = None, on_update = None, **kwargs):
"""Request paretn Scene to Interpolate attributes using the internal tweener.
Specify sprite's attributes that need changing.
`duration` defaults to 0.4 seconds and `easing` to cubic in-out
(for others see pytweener.Easing class).
Example::
# tween some_sprite to coordinates (50,100) using default duration and easing
self.animate(x = 50, y = 100)
"""
scene = self.get_scene()
if scene:
scene.animate(self, duration, easing, on_complete, on_update, **kwargs)
else:
for key, val in kwargs.items():
setattr(self, key, val)
def get_local_matrix(self):
if not self._matrix:
self._matrix = cairo.Matrix()
if self.snap_to_pixel:
self._matrix.translate(int(self.x) + int(self.pivot_x), int(self.y) + int(self.pivot_y))
else:
self._matrix.translate(self.x + self.pivot_x, self.y + self.pivot_y)
if self.rotation:
self._matrix.rotate(self.rotation)
if self.snap_to_pixel:
self._matrix.translate(int(-self.pivot_x), int(-self.pivot_y))
else:
self._matrix.translate(-self.pivot_x, -self.pivot_y)
if self.scale_x != 1 or self.scale_y != 1:
self._matrix.scale(self.scale_x, self.scale_y)
return cairo.Matrix() * self._matrix
def get_matrix(self):
"""return sprite's current transformation matrix"""
if self.parent:
return cairo_matrix_multiply(self.get_local_matrix(),
(self._prev_parent_matrix or self.parent.get_matrix()))
else:
return self.get_local_matrix()
def from_scene_coords(self, x=0, y=0):
"""Converts x, y given in the scene coordinates to sprite's local ones
coordinates"""
matrix = self.get_matrix()
matrix.invert()
return matrix.transform_point(x, y)
def to_scene_coords(self, x=0, y=0):
"""Converts x, y from sprite's local coordinates to scene coordinates"""
return self.get_matrix().transform_point(x, y)
def _draw(self, context, opacity = 1, parent_matrix = None):
if self.visible is False:
return
if (self._sprite_dirty): # send signal to redo the drawing when sprite is dirty
self.__dict__['_extents'] = None
self.emit("on-render")
self.__dict__["_sprite_dirty"] = False
parent_matrix = parent_matrix or cairo.Matrix()
# cache parent matrix
self._prev_parent_matrix = parent_matrix
matrix = self.get_local_matrix()
context.save()
context.transform(matrix)
if self.cache_as_bitmap:
self.graphics._draw_as_bitmap(context, self.opacity * opacity)
else:
self.graphics._draw(context, self.opacity * opacity)
self.__dict__['_prev_extents'] = self._extents or self.get_extents()
for sprite in self.sprites:
sprite._draw(context, self.opacity * opacity, cairo_matrix_multiply(matrix, parent_matrix))
context.restore()
context.new_path() #forget about us
class BitmapSprite(Sprite):
"""Caches given image data in a surface similar to targets, which ensures
that drawing it will be quick and low on CPU.
Image data can be either :class:`cairo.ImageSurface` or :class:`gtk.gdk.Pixbuf`
"""
def __init__(self, image_data = None, cache_mode = None, **kwargs):
Sprite.__init__(self, **kwargs)
self.width, self.height = None, None
self.cache_mode = cache_mode or cairo.CONTENT_COLOR_ALPHA
#: image data
self.image_data = image_data
self._surface = None
self.cache_attrs = self.cache_attrs ^ set(('_surface',))
def __setattr__(self, name, val):
Sprite.__setattr__(self, name, val)
if name == 'image_data':
self.__dict__['_surface'] = None
if self.image_data:
self.__dict__['width'] = self.image_data.get_width()
self.__dict__['height'] = self.image_data.get_height()
def _draw(self, context, opacity = 1, parent_matrix = None):
if self.image_data is None or self.width is None or self.height is None:
return
if not self._surface:
# caching image on surface similar to the target
surface = context.get_target().create_similar(self.cache_mode,
self.width,
self.height)
local_context = gtk.gdk.CairoContext(cairo.Context(surface))
if isinstance(self.image_data, gtk.gdk.Pixbuf):
local_context.set_source_pixbuf(self.image_data, 0, 0)
else:
local_context.set_source_surface(self.image_data)
local_context.paint()
# add instructions with the resulting surface
self.graphics.clear()
self.graphics.rectangle(0, 0, self.width, self.height)
self.graphics.clip()
self.graphics.set_source_surface(surface)
self.graphics.paint()
self._surface = surface
Sprite._draw(self, context, opacity, parent_matrix)
class Image(BitmapSprite):
"""Displays image by path. Currently supports only PNG images."""
def __init__(self, path, **kwargs):
BitmapSprite.__init__(self, **kwargs)
#: path to the image
self.path = path
def __setattr__(self, name, val):
BitmapSprite.__setattr__(self, name, val)
if name == 'path': # load when the value is set to avoid penalty on render
self.image_data = cairo.ImageSurface.create_from_png(self.path)
class Icon(BitmapSprite):
"""Displays icon by name and size in the theme"""
def __init__(self, name, size=24, **kwargs):
BitmapSprite.__init__(self, **kwargs)
self.theme = gtk.icon_theme_get_default()
#: icon name from theme
self.name = name
#: icon size in pixels
self.size = size
def __setattr__(self, name, val):
BitmapSprite.__setattr__(self, name, val)
if name in ('name', 'size'): # no other reason to discard cache than just on path change
if self.__dict__.get('name') and self.__dict__.get('size'):
self.image_data = self.theme.load_icon(self.name, self.size, 0)
else:
self.image_data = None
class Label(Sprite):
__gsignals__ = {
"on-change": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self, text = "", size = 10, color = None,
alignment = pango.ALIGN_LEFT,
max_width = None, wrap = None, ellipsize = None,
**kwargs):
Sprite.__init__(self, **kwargs)
self.width, self.height = None, None
self._test_context = gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A8, 0, 0)))
self._test_layout = self._test_context.create_layout()
#: pango.FontDescription, default is the system's font
self.font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
self.font_desc.set_size(size * pango.SCALE)
#: color of label either as hex string or an (r,g,b) tuple
self.color = color
self._bounds_width = None
#: wrapping method. Can be set to pango. [WRAP_WORD, WRAP_CHAR,
#: WRAP_WORD_CHAR]
self.wrap = wrap
#: Ellipsize mode. Can be set to pango. [ELLIPSIZE_NONE,
#: ELLIPSIZE_START, ELLIPSIZE_MIDDLE, ELLIPSIZE_END]
self.ellipsize = ellipsize
#: alignment. one of pango.[ALIGN_LEFT, ALIGN_RIGHT, ALIGN_CENTER]
self.alignment = alignment
#: font size
self.size = size
#: maximum width of the label in pixels. if specified, the label
#: will be wrapped or ellipsized depending on the wrap and ellpisize settings
self.max_width = max_width
self.__surface = None
#: label text
self.text = text
self._measures = {}
self.connect("on-render", self.on_render)
self.cache_attrs = self.cache_attrs ^ set(("_letter_sizes", "__surface", "_ascent", "_bounds_width", "_measures"))
def __setattr__(self, name, val):
if self.__dict__.get(name, "hamster_graphics_no_value_really") != val:
if name == "width" and val and self.__dict__.get('_bounds_width') and val * pango.SCALE == self.__dict__['_bounds_width']:
return
Sprite.__setattr__(self, name, val)
if name == "width":
# setting width means consumer wants to contrain the label
if val is None or val == -1:
self.__dict__['_bounds_width'] = None
else:
self.__dict__['_bounds_width'] = val * pango.SCALE
if name in ("width", "text", "size", "font_desc", "wrap", "ellipsize", "max_width"):
self._measures = {}
# avoid chicken and egg
if hasattr(self, "text") and hasattr(self, "size"):
self.__dict__['width'], self.__dict__['height'] = self.measure(self.text)
if name == 'text':
self.emit('on-change')
def measure(self, text):
"""measures given text with label's font and size.
returns width, height and ascent. Ascent's null in case if the label
does not have font face specified (and is thusly using pango)"""
if text in self._measures:
return self._measures[text]
width, height = None, None
context = self._test_context
layout = self._test_layout
layout.set_font_description(self.font_desc)
layout.set_markup(text)
max_width = 0
if self.max_width:
max_width = self.max_width * pango.SCALE
layout.set_width(int(self._bounds_width or max_width or -1))
layout.set_ellipsize(pango.ELLIPSIZE_NONE)
if self.wrap is not None:
layout.set_wrap(self.wrap)
else:
layout.set_ellipsize(self.ellipsize or pango.ELLIPSIZE_END)
width, height = layout.get_pixel_size()
self._measures[text] = width, height
return self._measures[text]
def on_render(self, sprite):
if not self.text:
self.graphics.clear()
return
self.graphics.set_color(self.color)
rect_width = self.width
max_width = 0
if self.max_width:
max_width = self.max_width * pango.SCALE
# when max width is specified and we are told to align in center
# do that (the pango instruction takes care of aligning within
# the lines of the text)
if self.alignment == pango.ALIGN_CENTER:
self.graphics.move_to(-(self.max_width - self.width)/2, 0)
bounds_width = max_width or self._bounds_width or -1
self.graphics.show_layout(self.text, self.font_desc,
self.alignment,
bounds_width,
self.wrap,
self.ellipsize)
if self._bounds_width:
rect_width = self._bounds_width / pango.SCALE
self.graphics.rectangle(0, 0, rect_width, self.height)
self.graphics.clip()
class Rectangle(Sprite):
def __init__(self, w, h, corner_radius = 0, fill = None, stroke = None, line_width = 1, **kwargs):
Sprite.__init__(self, **kwargs)
#: width
self.width = w
#: height
self.height = h
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = line_width
#: corner radius. Set bigger than 0 for rounded corners
self.corner_radius = corner_radius
self.connect("on-render", self.on_render)
def on_render(self, sprite):
self.graphics.set_line_style(width = self.line_width)
self.graphics.rectangle(0, 0, self.width, self.height, self.corner_radius)
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Polygon(Sprite):
def __init__(self, points, fill = None, stroke = None, line_width = 1, **kwargs):
Sprite.__init__(self, **kwargs)
#: list of (x,y) tuples that the line should go through. Polygon
#: will automatically close path.
self.points = points
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = line_width
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if not self.points: return
self.graphics.move_to(*self.points[0])
self.graphics.line_to(self.points)
self.graphics.close_path()
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Circle(Sprite):
def __init__(self, width, height, fill = None, stroke = None, line_width = 1, **kwargs):
Sprite.__init__(self, **kwargs)
#: circle width
self.width = width
#: circle height
self.height = height
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = line_width
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if self.width == self.height:
radius = self.width / 2.0
self.graphics.circle(radius, radius, radius)
else:
self.graphics.ellipse(0, 0, self.width, self.height)
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Scene(gtk.DrawingArea):
""" Drawing area for displaying sprites.
Add sprites to the Scene by calling :func:`add_child`.
Scene is descendant of `gtk.DrawingArea <http://www.pygtk.org/docs/pygtk/class-gtkdrawingarea.html>`_
and thus inherits all it's methods and everything.
"""
__gsignals__ = {
"expose-event": "override",
"configure_event": "override",
"on-enter-frame": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, )),
"on-finish-frame": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, )),
"on-click": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-drag": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-drag-start": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-drag-finish": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-mouse-move": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-down": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-up": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-over": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-out": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-scroll": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
def __init__(self, interactive = True, framerate = 60,
background_color = None, scale = False, keep_aspect = True):
gtk.DrawingArea.__init__(self)
if interactive:
self.set_events(gtk.gdk.POINTER_MOTION_MASK
| gtk.gdk.LEAVE_NOTIFY_MASK | gtk.gdk.ENTER_NOTIFY_MASK
| gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK
| gtk.gdk.SCROLL_MASK
| gtk.gdk.KEY_PRESS_MASK)
self.connect("motion_notify_event", self.__on_mouse_move)
self.connect("enter_notify_event", self.__on_mouse_enter)
self.connect("leave_notify_event", self.__on_mouse_leave)
self.connect("button_press_event", self.__on_button_press)
self.connect("button_release_event", self.__on_button_release)
self.connect("scroll-event", self.__on_scroll)
#: list of sprites in scene. use :func:`add_child` to add sprites
self.sprites = []
#: framerate of animation. This will limit how often call for
#: redraw will be performed (that is - not more often than the framerate). It will
#: also influence the smoothness of tweeners.
self.framerate = framerate
#: Scene width. Will be `None` until first expose (that is until first
#: on-enter-frame signal below).
self.width = None
#: Scene height. Will be `None` until first expose (that is until first
#: on-enter-frame signal below).
self.height = None
#: instance of :class:`pytweener.Tweener` that is used by
#: :func:`animate` function, but can be also accessed directly for advanced control.
self.tweener = False
if pytweener:
self.tweener = pytweener.Tweener(0.4, pytweener.Easing.Cubic.ease_in_out)
#: instance of :class:`Colors` class for color parsing
self.colors = Colors
#: read only info about current framerate (frames per second)
self.fps = 0 # inner frames per second counter
#: Last known x position of the mouse (set on expose event)
self.mouse_x = None
#: Last known y position of the mouse (set on expose event)
self.mouse_y = None
#: Background color of the scene. Use either a string with hex color or an RGB triplet.
self.background_color = background_color
#: Mouse cursor appearance.
#: Replace with your own cursor or set to False to have no cursor.
#: None will revert back the default behavior
self.mouse_cursor = None
blank_pixmap = gtk.gdk.Pixmap(None, 1, 1, 1)
self._blank_cursor = gtk.gdk.Cursor(blank_pixmap, blank_pixmap, gtk.gdk.Color(), gtk.gdk.Color(), 0, 0)
#: Miminum distance in pixels for a drag to occur
self.drag_distance = 1
self._last_frame_time = None
self._mouse_sprite = None
self._drag_sprite = None
self._mouse_down_sprite = None
self.__drag_started = False
self.__drag_start_x, self.__drag_start_y = None, None
self._mouse_in = False
self.__last_cursor = None
self.__drawing_queued = False
self._redraw_in_progress = False
#: When specified, upon window resize the content will be scaled
#: relative to original window size. Defaults to False.
self.scale = scale
#: Should the stage maintain aspect ratio upon scale if
#: :attr:`Scene.scale` is enabled. Defaults to true.
self.keep_aspect = keep_aspect
self._original_width, self._original_height = None, None
def add_child(self, *sprites):
"""Add one or several :class:`Sprite` objects to the scene"""
for sprite in sprites:
if sprite == self:
raise Exception("trying to add sprite to itself")
if sprite.parent:
sprite.x, sprite.y = sprite.to_scene_coords(0, 0)
sprite.parent.remove_child(sprite)
self.sprites.append(sprite)
sprite.parent = self
self._sort()
def _sort(self):
"""sort sprites by z_order"""
self.sprites = sorted(self.sprites, key=lambda sprite:sprite.z_order)
def remove_child(self, *sprites):
"""Remove one or several :class:`Sprite` sprites from scene """
for sprite in sprites:
self.sprites.remove(sprite)
sprite._scene = None
sprite.parent = None
# these two mimic sprite functions so parent check can be avoided
def from_scene_coords(self, x, y): return x, y
def to_scene_coords(self, x, y): return x, y
def get_matrix(self): return cairo.Matrix()
def clear(self):
"""Remove all sprites from scene"""
self.remove_child(*self.sprites)
def animate(self, sprite, duration = None, easing = None, on_complete = None, on_update = None, **kwargs):
"""Interpolate attributes of the given object using the internal tweener
and redrawing scene after every tweener update.
Specify the sprite and sprite's attributes that need changing.
`duration` defaults to 0.4 seconds and `easing` to cubic in-out
(for others see pytweener.Easing class).
Redraw is requested right after creating the animation.
Example::
# tween some_sprite to coordinates (50,100) using default duration and easing
scene.animate(some_sprite, x = 50, y = 100)
"""
if not self.tweener: # here we complain
raise Exception("pytweener was not found. Include it to enable animations")
tween = self.tweener.add_tween(sprite,
duration=duration,
easing=easing,
on_complete=on_complete,
on_update=on_update,
**kwargs)
self.redraw()
return tween
def redraw(self):
"""Queue redraw. The redraw will be performed not more often than
the `framerate` allows"""
if self.__drawing_queued == False: #if we are moving, then there is a timeout somewhere already
self.__drawing_queued = True
self._last_frame_time = dt.datetime.now()
gobject.timeout_add(1000 / self.framerate, self.__redraw_loop)
def __redraw_loop(self):
"""loop until there is nothing more to tween"""
self.queue_draw() # this will trigger do_expose_event when the current events have been flushed
self.__drawing_queued = self.tweener and self.tweener.has_tweens()
return self.__drawing_queued
def do_expose_event(self, event):
context = self.window.cairo_create()
# clip to the visible part
context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
if self.background_color:
color = self.colors.parse(self.background_color)
context.set_source_rgb(*color)
context.fill_preserve()
context.clip()
if self.scale:
aspect_x = self.width / self._original_width
aspect_y = self.height / self._original_height
if self.keep_aspect:
aspect_x = aspect_y = min(aspect_x, aspect_y)
context.scale(aspect_x, aspect_y)
self.mouse_x, self.mouse_y, mods = self.get_window().get_pointer()
self._redraw_in_progress = True
# update tweens
now = dt.datetime.now()
delta = (now - (self._last_frame_time or dt.datetime.now())).microseconds / 1000000.0
self._last_frame_time = now
if self.tweener:
self.tweener.update(delta)
self.fps = 1 / delta
# start drawing
self.emit("on-enter-frame", context)
for sprite in self.sprites:
sprite._draw(context)
self.__check_mouse(self.mouse_x, self.mouse_y)
self.emit("on-finish-frame", context)
self._redraw_in_progress = False
def do_configure_event(self, event):
if self._original_width is None:
self._original_width = float(event.width)
self._original_height = float(event.height)
self.width, self.height = event.width, event.height
def all_visible_sprites(self):
"""Returns flat list of the sprite tree for simplified iteration"""
def all_recursive(sprites):
for sprite in sprites:
if sprite.visible:
yield sprite
if sprite.sprites:
for child in all_recursive(sprite.sprites):
yield child
return all_recursive(self.sprites)
def get_sprite_at_position(self, x, y):
"""Returns the topmost visible interactive sprite for given coordinates"""
over = None
for sprite in self.all_visible_sprites():
if (sprite.interactive or sprite.draggable) and sprite.check_hit(x, y):
over = sprite
return over
def __check_mouse(self, x, y):
if x is None or self._mouse_in == False:
return
cursor = gtk.gdk.ARROW # default
if self.mouse_cursor is not None:
cursor = self.mouse_cursor
if self._drag_sprite:
cursor = self._drag_sprite.mouse_cursor or self.mouse_cursor or gtk.gdk.FLEUR
else:
#check if we have a mouse over
over = self.get_sprite_at_position(x, y)
if self._mouse_sprite and self._mouse_sprite != over:
self._mouse_sprite.emit("on-mouse-out")
self.emit("on-mouse-out", self._mouse_sprite)
if over:
if over.mouse_cursor is not None:
cursor = over.mouse_cursor
elif self.mouse_cursor is None:
# resort to defaults
if over.draggable:
cursor = gtk.gdk.FLEUR
else:
cursor = gtk.gdk.HAND2
if over != self._mouse_sprite:
over.emit("on-mouse-over")
self.emit("on-mouse-over", over)
self._mouse_sprite = over
if cursor == False:
cursor = self._blank_cursor
if not self.__last_cursor or cursor != self.__last_cursor:
if isinstance(cursor, gtk.gdk.Cursor):
self.window.set_cursor(cursor)
else:
self.window.set_cursor(gtk.gdk.Cursor(cursor))
self.__last_cursor = cursor
""" mouse events """
def __on_mouse_move(self, area, event):
state = event.state
if self._mouse_down_sprite and self._mouse_down_sprite.draggable \
and gtk.gdk.BUTTON1_MASK & event.state:
# dragging around
if not self.__drag_started:
drag_started = (self.__drag_start_x is not None and \
(self.__drag_start_x - event.x) ** 2 + \
(self.__drag_start_y - event.y) ** 2 > self.drag_distance ** 2)
if drag_started:
self._drag_sprite = self._mouse_down_sprite
self._drag_sprite.drag_x, self._drag_sprite.drag_y = self._drag_sprite.x, self._drag_sprite.y
self._drag_sprite.emit("on-drag-start", event)
self.emit("on-drag-start", self._drag_sprite, event)
self.__drag_started = True
if self.__drag_started:
diff_x, diff_y = event.x - self.__drag_start_x, event.y - self.__drag_start_y
if isinstance(self._drag_sprite.parent, Sprite):
matrix = self._drag_sprite.parent.get_matrix()
matrix.invert()
diff_x, diff_y = matrix.transform_distance(diff_x, diff_y)
self._drag_sprite.x, self._drag_sprite.y = self._drag_sprite.drag_x + diff_x, self._drag_sprite.drag_y + diff_y
self._drag_sprite.emit("on-drag", event)
self.emit("on-drag", self._drag_sprite, event)
else:
# avoid double mouse checks - the redraw will also check for mouse!
if not self.__drawing_queued:
self.__check_mouse(event.x, event.y)
self.emit("on-mouse-move", event)
def __on_mouse_enter(self, area, event):
self._mouse_in = True
def __on_mouse_leave(self, area, event):
self._mouse_in = False
if self._mouse_sprite:
self.emit("on-mouse-out", self._mouse_sprite)
self._mouse_sprite = None
def __on_button_press(self, area, event):
target = self.get_sprite_at_position(event.x, event.y)
self.__drag_start_x, self.__drag_start_y = event.x, event.y
self._mouse_down_sprite = target
if target:
target.emit("on-mouse-down", event)
self.emit("on-mouse-down", event)
def __on_button_release(self, area, event):
target = self.get_sprite_at_position(event.x, event.y)
if target:
target.emit("on-mouse-up", event)
self.emit("on-mouse-up", event)
# trying to not emit click and drag-finish at the same time
click = not self.__drag_started or (event.x - self.__drag_start_x) ** 2 + \
(event.y - self.__drag_start_y) ** 2 < self.drag_distance
if (click and self.__drag_started == False) or not self._drag_sprite:
if target:
target.emit("on-click", event)
self.emit("on-click", event, target)
if self._drag_sprite:
self._drag_sprite.emit("on-drag-finish", event)
self.emit("on-drag-finish", self._drag_sprite, event)
self._drag_sprite.drag_x, self._drag_sprite.drag_y = None, None
self._drag_sprite = None
self._mouse_down_sprite = None
self.__drag_started = False
self.__drag_start_x, self__drag_start_y = None, None
def __on_scroll(self, area, event):
self.emit("on-scroll", event)
|
omaciel/billreminder
|
src/gui/widgets/graphics.py
|
Python
|
gpl-3.0
| 62,323
|
[
"FLEUR"
] |
36676bdfe40602374d0db458d6678fc5896dd25e5e06f4be999ff85c57f5c742
|
#! /usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
test.py [-aBbcdDfgGhLmprtTuv] [modfilter [testfilter]]
Test harness.
-a level
--all
Run the tests at the given level. Any test at a level at or below this is
run, any test at a level above this is not run. Level 0 runs all tests.
The default is to run tests at level 1. --all is a shortcut for -a 0.
-b
--build
Run "python setup.py build" before running tests, where "python"
is the version of python used to run test.py. Highly recommended.
Tests will be run from the build directory. (Note: In Python < 2.3
the -q flag is added to the setup.py command line.)
-B
Run "python setup.py build_ext -i" before running tests. Tests will be
run from the source directory.
-c use pychecker
-d
Instead of the normal test harness, run a debug version which
doesn't catch any exceptions. This is occasionally handy when the
unittest code catching the exception doesn't work right.
Unfortunately, the debug harness doesn't print the name of the
test, so Use With Care.
--dir directory
Option to limit where tests are searched for. This is
important when you *really* want to limit the code that gets run.
For example, if refactoring interfaces, you don't want to see the way
you have broken setups for tests in other packages. You *just* want to
run the interface tests.
-D
Works like -d, except that it loads pdb when an exception occurs.
-f
Run functional tests instead of unit tests.
-g threshold
Set the garbage collector generation0 threshold. This can be used to
stress memory and gc correctness. Some crashes are only reproducible when
the threshold is set to 1 (agressive garbage collection). Do "-g 0" to
disable garbage collection altogether.
-G gc_option
Set the garbage collection debugging flags. The argument must be one
of the DEBUG_ flags defined bythe Python gc module. Multiple options
can be specified by using "-G OPTION1 -G OPTION2."
--libdir test_root
Search for tests starting in the specified start directory
(useful for testing components being developed outside the main
"src" or "build" trees).
--keepbytecode
Do not delete all stale bytecode before running tests
-L
Keep running the selected tests in a loop. You may experience
memory leakage.
-t
Time the individual tests and print a list of the top 50, sorted from
longest to shortest.
-p
Show running progress. It can be combined with -v or -vv.
-r
Look for refcount problems.
This requires that Python was built --with-pydebug.
-T
Use the trace module from Python for code coverage. XXX This only works
if trace.py is explicitly added to PYTHONPATH. The current utility writes
coverage files to a directory named `coverage' that is parallel to
`build'. It also prints a summary to stdout.
-v
Verbose output. With one -v, unittest prints a dot (".") for each test
run. With -vv, unittest prints the name of each test (for some definition
of "name" ...). With no -v, unittest is silent until the end of the run,
except when errors occur.
When -p is also specified, the meaning of -v is sligtly changed. With
-p and no -v only the percent indicator is displayed. With -p and -v
the test name of the current test is shown to the right of the percent
indicator. With -p and -vv the test name is not truncated to fit into
80 columns and it is not cleared after the test finishes.
-u
-m
Use the PyUnit GUI instead of output to the command line. The GUI imports
tests on its own, taking care to reload all dependencies on each run. The
debug (-d), verbose (-v), progress (-p), and Loop (-L) options will be
ignored. The testfilter filter is also not applied.
-m starts the gui minimized. Double-clicking the progress bar will start
the import and run all tests.
modfilter
testfilter
Case-sensitive regexps to limit which tests are run, used in search
(not match) mode.
In an extension of Python regexp notation, a leading "!" is stripped
and causes the sense of the remaining regexp to be negated (so "!bc"
matches any string that does not match "bc", and vice versa).
By default these act like ".", i.e. nothing is excluded.
modfilter is applied to a test file's path, starting at "build" and
including (OS-dependent) path separators.
testfilter is applied to the (method) name of the unittest methods
contained in the test files whose paths modfilter matched.
Extreme (yet useful) examples:
test.py -vvb . "^testWriteClient$"
Builds the project silently, then runs unittest in verbose mode on all
tests whose names are precisely "testWriteClient". Useful when
debugging a specific test.
test.py -vvb . "!^testWriteClient$"
As before, but runs all tests whose names aren't precisely
"testWriteClient". Useful to avoid a specific failing test you don't
want to deal with just yet.
test.py -m . "!^testWriteClient$"
As before, but now opens up a minimized PyUnit GUI window (only showing
the progress bar). Useful for refactoring runs where you continually want
to make sure all tests still pass.
"""
import gc
import os
import re
import pdb
import sys
import threading # just to get at Thread objects created by tests
import time
import traceback
import unittest
import warnings
from distutils.util import get_platform
PLAT_SPEC = "%s-%s" % (get_platform(), sys.version[0:3])
class ImmediateTestResult(unittest._TextTestResult):
__super_init = unittest._TextTestResult.__init__
__super_startTest = unittest._TextTestResult.startTest
__super_printErrors = unittest._TextTestResult.printErrors
def __init__(self, stream, descriptions, verbosity, debug=0,
count=None, progress=0):
self.__super_init(stream, descriptions, verbosity)
self._debug = debug
self._progress = progress
self._progressWithNames = 0
self._count = count
self._testtimes = {}
# docstrings for tests don't override test-descriptions:
self.descriptions = False
if progress and verbosity == 1:
self.dots = 0
self._progressWithNames = 1
self._lastWidth = 0
self._maxWidth = 80
try:
import curses
except ImportError:
pass
else:
curses.setupterm()
self._maxWidth = curses.tigetnum('cols')
self._maxWidth -= len("xxxx/xxxx (xxx.x%): ") + 1
def stopTest(self, test):
self._testtimes[test] = time.time() - self._testtimes[test]
if gc.garbage:
print ("The following test left garbage:")
print (test)
print (gc.garbage)
# eat the garbage here, so that the garbage isn't
# printed for every subsequent test.
gc.garbage[:] = []
# Did the test leave any new threads behind?
new_threads = [t for t in threading.enumerate()
if (t.isAlive()
and
t not in self._threads)]
if new_threads:
print ("The following test left new threads behind:")
print (test)
print ("New thread(s):", new_threads)
def print_times(self, stream, count=None):
results = self._testtimes.items()
results.sort(lambda x, y: cmp(y[1], x[1]))
if count:
n = min(count, len(results))
if n:
print >>stream, "Top %d longest tests:" % n
else:
n = len(results)
if not n:
return
for i in range(n):
print >>stream, "%6dms" % int(results[i][1] * 1000), results[i][0]
def _print_traceback(self, msg, err, test, errlist):
if self.showAll or self.dots or self._progress:
self.stream.writeln("\n")
self._lastWidth = 0
tb = "".join(traceback.format_exception(*err))
self.stream.writeln(msg)
self.stream.writeln(tb)
errlist.append((test, tb))
def startTest(self, test):
if self._progress:
self.stream.write("\r%4d" % (self.testsRun + 1))
if self._count:
self.stream.write("/%d (%5.1f%%)" % (self._count,
(self.testsRun + 1) * 100.0 / self._count))
if self.showAll:
self.stream.write(": ")
elif self._progressWithNames:
# XXX will break with multibyte strings
name = self.getShortDescription(test)
width = len(name)
if width < self._lastWidth:
name += " " * (self._lastWidth - width)
self.stream.write(": %s" % name)
self._lastWidth = width
self.stream.flush()
self._threads = threading.enumerate()
self.__super_startTest(test)
self._testtimes[test] = time.time()
def getShortDescription(self, test):
s = self.getDescription(test)
if len(s) > self._maxWidth:
pos = s.find(" (")
if pos >= 0:
w = self._maxWidth - (pos + 5)
if w < 1:
# first portion (test method name) is too long
s = s[:self._maxWidth-3] + "..."
else:
pre = s[:pos+2]
post = s[-w:]
s = "%s...%s" % (pre, post)
return s[:self._maxWidth]
def addError(self, test, err):
if self._progress:
self.stream.write("\r")
if self._debug:
raise err[0] (err[1], err[2])
self._print_traceback("Error in test %s" % test, err,
test, self.errors)
def addFailure(self, test, err):
if self._progress:
self.stream.write("\r")
if self._debug:
raise err[0] (err[1], err[2])
self._print_traceback("Failure in test %s" % test, err,
test, self.failures)
def printErrors(self):
if self._progress and not (self.dots or self.showAll):
self.stream.writeln()
self.__super_printErrors()
def printErrorList(self, flavor, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavor, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
class ImmediateTestRunner(unittest.TextTestRunner):
__super_init = unittest.TextTestRunner.__init__
def __init__(self, **kwarg):
debug = kwarg.get("debug")
if debug is not None:
del kwarg["debug"]
progress = kwarg.get("progress")
if progress is not None:
del kwarg["progress"]
self.__super_init(**kwarg)
self._debug = debug
self._progress = progress
def _makeResult(self):
return ImmediateTestResult(self.stream, self.descriptions,
self.verbosity, debug=self._debug,
count=self._count, progress=self._progress)
def run(self, test):
self._count = test.countTestCases()
return unittest.TextTestRunner.run(self, test)
# setup list of directories to put on the path
class PathInit:
def __init__(self, build, build_inplace, libdir=None):
self.inplace = None
# Figure out if we should test in-place or test in-build. If the -b
# or -B option was given, test in the place we were told to build in.
# Otherwise, we'll look for a build directory and if we find one,
# we'll test there, otherwise we'll test in-place.
if build:
self.inplace = build_inplace
if self.inplace is None:
# Need to figure it out
if os.path.isdir(os.path.join("build", "lib.%s" % PLAT_SPEC)):
self.inplace = 0
else:
self.inplace = 1
# Calculate which directories we're going to add to sys.path, and cd
# to the appropriate working directory
org_cwd = os.getcwd()
if self.inplace:
self.libdir = "src"
else:
self.libdir = "lib.%s" % PLAT_SPEC
os.chdir("build")
# Hack sys.path
self.cwd = os.getcwd()
sys.path.insert(0, os.path.join(self.cwd, self.libdir))
# Hack again for external products.
global functional
kind = functional and "functional" or "unit"
if libdir:
extra = os.path.join(org_cwd, libdir)
print ("Running %s tests from %s" % (kind, extra))
self.libdir = extra
sys.path.insert(0, extra)
else:
print ("Running %s tests from %s" % (kind, self.cwd))
# Make sure functional tests find ftesting.zcml
if functional:
config_file = 'ftesting.zcml'
if not self.inplace:
# We chdired into build, so ftesting.zcml is in the
# parent directory
config_file = os.path.join('..', 'ftesting.zcml')
print ("Parsing %s" % config_file)
from zope.testing.functional import FunctionalTestSetup
FunctionalTestSetup(config_file)
def match(rx, s):
if not rx:
return 1
if rx[0] == "!":
return re.search(rx[1:], s) is None
else:
return re.search(rx, s) is not None
class TestFileFinder:
def __init__(self, prefix):
self.files = []
self._plen = len(prefix)
if not prefix.endswith(os.sep):
self._plen += 1
global functional
if functional:
self.dirname = "ftest"
else:
self.dirname = "test"
def visit(self, rx, dir, files):
if os.path.split(dir)[1] != self.dirname:
return
# ignore tests that aren't in packages
if not "__init__.py" in files:
if not files or files == ["CVS"]:
return
print ("not a package", dir)
return
# Put matching files in matches. If matches is non-empty,
# then make sure that the package is importable.
matches = []
for file in files:
if file.startswith('test') and os.path.splitext(file)[-1] == '.py':
path = os.path.join(dir, file)
if match(rx, path):
matches.append(path)
# ignore tests when the package can't be imported, possibly due to
# dependency failures.
pkg = dir[self._plen:].replace(os.sep, '.')
try:
__import__(pkg)
# We specifically do not want to catch ImportError since that's useful
# information to know when running the tests.
except RuntimeError as e:
if VERBOSE:
print ("skipping %s because: %s" % (pkg, e))
return
else:
self.files.extend(matches)
def module_from_path(self, path):
"""Return the Python package name indicated by the filesystem path."""
assert path.endswith(".py")
path = path[self._plen:-3]
mod = path.replace(os.sep, ".")
return mod
def walk_with_symlinks(top, func, arg):
"""Like os.walk, but follows symlinks on POSIX systems.
This could theoreticaly result in an infinite loop, if you create symlink
cycles in your Zope sandbox, so don't do that.
"""
try:
# Prevent 'hidden' files (those starting with '.') from being considered.
names = [f for f in os.listdir(top) if not f.startswith('.')]
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = os.path.join(top, name)
if os.path.isdir(name):
walk_with_symlinks(name, func, arg)
def check_test_dir():
global test_dir
if test_dir and not os.path.exists(test_dir):
d = pathinit.libdir
d = os.path.join(d, test_dir)
if os.path.exists(d):
if not os.path.isdir(d):
raise ValueError(
"%s does not exist and %s is not a directory"
% (test_dir, d)
)
test_dir = d
else:
raise ValueError("%s does not exist!" % test_dir)
def find_tests(rx):
global finder
finder = TestFileFinder(pathinit.libdir)
check_test_dir()
walkdir = test_dir or pathinit.libdir
walk_with_symlinks(walkdir, finder.visit, rx)
return finder.files
def package_import(modname):
mod = __import__(modname)
for part in modname.split(".")[1:]:
mod = getattr(mod, part)
return mod
def get_suite(file):
modname = finder.module_from_path(file)
try:
mod = package_import(modname)
except ImportError as err:
# print traceback
print ("Error importing %s\n%s" % (modname, err))
traceback.print_exc()
if debug:
raise
return None
try:
suite_func = mod.test_suite
except AttributeError:
print ("No test_suite() in %s" % file)
return None
return suite_func()
def filter_testcases(s, rx):
new = unittest.TestSuite()
for test in s._tests:
# See if the levels match
dolevel = (level == 0) or level >= getattr(test, "level", 0)
if not dolevel:
continue
if isinstance(test, unittest.TestCase):
name = test.id() # Full test name: package.module.class.method
name = name[1 + name.rfind("."):] # extract method name
if not rx or match(rx, name):
new.addTest(test)
else:
filtered = filter_testcases(test, rx)
if filtered:
new.addTest(filtered)
return new
def gui_runner(files, test_filter):
if build_inplace:
utildir = os.path.join(os.getcwd(), "utilities")
else:
utildir = os.path.join(os.getcwd(), "..", "utilities")
sys.path.append(utildir)
import unittestgui
suites = []
for file in files:
suites.append(finder.module_from_path(file) + ".test_suite")
suites = ", ".join(suites)
minimal = (GUI == "minimal")
# unittestgui apparently doesn't take the minimal flag anymore
unittestgui.main(suites)
class TrackRefs:
"""Object to track reference counts across test runs."""
def __init__(self):
self.type2count = {}
self.type2all = {}
def update(self):
obs = sys.getobjects(0)
type2count = {}
type2all = {}
for o in obs:
all = sys.getrefcount(o)
t = type(o)
if t in type2count:
type2count[t] += 1
type2all[t] += all
else:
type2count[t] = 1
type2all[t] = all
ct = [(type2count[t] - self.type2count.get(t, 0),
type2all[t] - self.type2all.get(t, 0),
t)
for t in type2count.iterkeys()]
ct.sort()
ct.reverse()
for delta1, delta2, t in ct:
if delta1 or delta2:
print ("%-55s %8d %8d" % (t, delta1, delta2))
self.type2count = type2count
self.type2all = type2all
def runner(files, test_filter, debug):
runner = ImmediateTestRunner(verbosity=VERBOSE, debug=debug,
progress=progress)
suite = unittest.TestSuite()
for file in files:
s = get_suite(file)
# See if the levels match
dolevel = (level == 0) or level >= getattr(s, "level", 0)
if s is not None and dolevel:
s = filter_testcases(s, test_filter)
suite.addTest(s)
try:
r = runner.run(suite)
if timesfn:
r.print_times(open(timesfn, "w"))
if VERBOSE:
print ("Wrote timing data to", timesfn)
if timetests:
r.print_times(sys.stdout, timetests)
except:
if debugger:
print ("%s:" % (sys.exc_info()[0], ))
print (sys.exc_info()[1])
pdb.post_mortem(sys.exc_info()[2])
else:
raise
def remove_stale_bytecode(arg, dirname, names):
names = map(os.path.normcase, names)
for name in names:
if name.endswith(".pyc") or name.endswith(".pyo"):
srcname = name[:-1]
if srcname not in names:
fullname = os.path.join(dirname, name)
print ("Removing stale bytecode file", fullname)
os.unlink(fullname)
def main(module_filter, test_filter, libdir):
if not keepStaleBytecode:
os.walk(os.curdir, remove_stale_bytecode, None)
# Get the log.ini file from the current directory instead of possibly
# buried in the build directory. XXX This isn't perfect because if
# log.ini specifies a log file, it'll be relative to the build directory.
# Hmm...
logini = os.path.abspath("log.ini")
# Initialize the path and cwd
global pathinit
pathinit = PathInit(build, build_inplace, libdir)
# Initialize the logging module.
import logging.config
logging.basicConfig()
level = os.getenv("LOGGING")
if level:
level = int(level)
else:
level = logging.CRITICAL
logging.root.setLevel(level)
if os.path.exists(logini):
logging.config.fileConfig(logini)
files = find_tests(module_filter)
files.sort()
if GUI:
gui_runner(files, test_filter)
elif LOOP:
if REFCOUNT:
rc = sys.gettotalrefcount()
track = TrackRefs()
while 1:
runner(files, test_filter, debug)
gc.collect()
if gc.garbage:
print ("GARBAGE:", len(gc.garbage), gc.garbage)
return
if REFCOUNT:
prev = rc
rc = sys.gettotalrefcount()
print ("totalrefcount=%-8d change=%-6d" % (rc, rc - prev))
track.update()
else:
runner(files, test_filter, debug)
def process_args(argv=None):
import getopt
global module_filter
global test_filter
global VERBOSE
global LOOP
global GUI
global TRACE
global REFCOUNT
global debug
global debugger
global build
global level
global libdir
global timesfn
global timetests
global progress
global build_inplace
global keepStaleBytecode
global functional
global test_dir
if argv is None:
argv = sys.argv
module_filter = None
test_filter = None
VERBOSE = 2
LOOP = 0
GUI = 0
TRACE = 0
REFCOUNT = 0
debug = 0 # Don't collect test results; simply let tests crash
debugger = 0
build = 0
build_inplace = 0
gcthresh = None
gcdebug = 0
gcflags = []
level = 1
libdir = '.'
progress = 0
timesfn = None
timetests = 0
keepStaleBytecode = 0
functional = 0
test_dir = None
try:
opts, args = getopt.getopt(argv[1:], "a:bBcdDfg:G:hLmprtTuv",
["all", "help", "libdir=", "times=",
"keepbytecode", "dir=", "build"])
except getopt.error as msg:
print (msg)
print ("Try `python %s -h' for more information." % argv[0])
sys.exit(2)
for k, v in opts:
if k == "-a":
level = int(v)
elif k == "--all":
level = 0
os.environ["COMPLAIN_IF_TESTS_MISSED"]='1'
elif k in ("-b", "--build"):
build = 1
elif k == "-B":
build = build_inplace = 1
elif k == "-c":
# make sure you have a recent version of pychecker
if not os.environ.get("PYCHECKER"):
os.environ["PYCHECKER"] = "-q"
import pychecker.checker
elif k == "-d":
debug = 1
elif k == "-D":
debug = 1
debugger = 1
elif k == "-f":
functional = 1
elif k in ("-h", "--help"):
print (__doc__)
sys.exit(0)
elif k == "-g":
gcthresh = int(v)
elif k == "-G":
if not v.startswith("DEBUG_"):
print ("-G argument must be DEBUG_ flag, not", repr(v))
sys.exit(1)
gcflags.append(v)
elif k == '--keepbytecode':
keepStaleBytecode = 1
elif k == '--libdir':
libdir = v
elif k == "-L":
LOOP = 1
elif k == "-m":
GUI = "minimal"
elif k == "-p":
progress = 1
elif k == "-r":
if hasattr(sys, "gettotalrefcount"):
REFCOUNT = 1
else:
print ("-r ignored, because it needs a debug build of Python")
elif k == "-T":
TRACE = 1
elif k == "-t":
if not timetests:
timetests = 50
elif k == "-u":
GUI = 1
elif k == "-v":
VERBOSE += 1
elif k == "--times":
try:
timetests = int(v)
except ValueError:
# must be a filename to write
timesfn = v
elif k == '--dir':
test_dir = v
if gcthresh is not None:
if gcthresh == 0:
gc.disable()
print ("gc disabled")
else:
gc.set_threshold(gcthresh)
print ("gc threshold:", gc.get_threshold())
if gcflags:
val = 0
for flag in gcflags:
v = getattr(gc, flag, None)
if v is None:
print ("Unknown gc flag", repr(flag))
print (gc.set_debug.__doc__)
sys.exit(1)
val |= v
gcdebug |= v
if gcdebug:
gc.set_debug(gcdebug)
if build:
# Python 2.3 is more sane in its non -q output
if sys.hexversion >= 0x02030000:
qflag = ""
else:
qflag = "-q"
cmd = sys.executable + " setup.py " + qflag + " build"
if build_inplace:
cmd += "_ext -i"
if VERBOSE:
print (cmd)
sts = os.system(cmd)
if sts:
print ("Build failed", hex(sts))
sys.exit(1)
if VERBOSE:
kind = functional and "functional" or "unit"
if level == 0:
print ("Running %s tests at all levels" % kind)
else:
print ("Running %s tests at level %d" % (kind, level))
# XXX We want to change *visible* warnings into errors. The next
# line changes all warnings into errors, including warnings we
# normally never see. In particular, test_datetime does some
# short-integer arithmetic that overflows to long ints, and, by
# default, Python doesn't display the overflow warning that can
# be enabled when this happens. The next line turns that into an
# error instead. Guido suggests that a better to get what we're
# after is to replace warnings.showwarning() with our own thing
# that raises an error.
## warnings.filterwarnings("error")
warnings.filterwarnings("ignore", module="logging")
if args:
if len(args) > 1:
test_filter = args[1]
module_filter = args[0]
try:
if TRACE:
# if the trace module is used, then we don't exit with
# status if on a false return value from main.
coverdir = os.path.join(os.getcwd(), "coverage")
import trace
ignoremods = ["os", "posixpath", "stat"]
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
ignoremods=ignoremods,
trace=0, count=1)
tracer.runctx("main(module_filter, test_filter, libdir)",
globals=globals(), locals=vars())
r = tracer.results()
path = "/tmp/trace.%s" % os.getpid()
import cPickle
f = open(path, "wb")
cPickle.dump(r, f)
f.close()
print (path)
r.write_results(show_missing=1, summary=1, coverdir=coverdir)
else:
bad = main(module_filter, test_filter, libdir)
if bad:
sys.exit(1)
except ImportError as err:
print (err)
print (sys.path)
raise
if __name__ == "__main__":
process_args()
|
medularis/py-star
|
run_tests.py
|
Python
|
bsd-3-clause
| 29,564
|
[
"VisIt"
] |
d361bb18723867b6e0b0a8bb5ff252056c6ce33e17c006cc7831fac7028e767f
|
from __future__ import absolute_import
from unittest import TestCase, skip
from ccdproc import CCDData
from astropy.convolution import convolve, Gaussian1DKernel, Box1DKernel
from astropy.io import fits
from astropy.modeling import Model
from astropy.modeling import (models,
fitting)
import astropy.units as u
import collections
import mock
import numpy as np
import os
import pandas
import random
import re
import shutil
import logging
logging.disable(logging.CRITICAL)
# import all classes in core.py
from ..core import (GenerateDcrParFile,
NightDataContainer,
NoMatchFound,
NotEnoughLinesDetected,
NoTargetException,
ReferenceData,
SaturationValues,
SpectroscopicMode)
# import of functions in core.py
from ..core import (astroscrappy_lacosmic,
add_linear_wavelength_solution,
add_wcs_keys,
bias_subtract,
bin_reference_data,
call_cosmic_rejection,
classify_spectroscopic_data,
combine_data,
convert_time,
create_master_bias,
create_master_flats,
cross_correlation,
dcr_cosmicray_rejection,
define_trim_section,
extraction,
extract_fractional_pixel,
extract_optimal,
evaluate_wavelength_solution,
fix_keywords,
fractional_sum,
get_best_flat,
get_central_wavelength,
get_lines_in_lamp,
get_overscan_region,
get_spectral_characteristics,
get_slit_trim_section,
get_twilight_time,
identify_targets,
image_overscan,
image_trim,
interpolate,
is_file_saturated,
linearize_spectrum,
name_master_flats,
normalize_master_flat,
ra_dec_to_deg,
read_fits,
record_trace_information,
save_extracted,
search_comp_group,
setup_logging,
trace,
trace_targets,
validate_ccd_region,
write_fits)
def fake_subprocess_popen(*args, stdout, stderr):
raise OSError
def fake_dcr_communicate_stderr_no_dcr():
return b'some message', b'dcr: not found'
class AddLinearWavelengthSolutionTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.random.random_sample(200),
meta=fits.Header(),
unit='adu')
self.ccd = add_wcs_keys(ccd=self.ccd)
self.ccd.header.set('SLIT',
value='1.0_LONG_SLIT',
comment="slit [arcsec]")
def test_add_wavelength_solution(self):
calibration_lamp = 'non-existent.fits'
crval1 = 3977.948
npix = 4060
cdelt = 0.9910068
x_axis = np.linspace(crval1,
crval1 + cdelt * npix,
npix)
self.ccd = add_linear_wavelength_solution(
ccd=self.ccd,
x_axis=x_axis,
reference_lamp=calibration_lamp)
self.assertEqual(self.ccd.header['CTYPE1'], 'LINEAR')
self.assertEqual(self.ccd.header['CRVAL1'], crval1)
self.assertEqual(self.ccd.header['CRPIX1'], 1)
self.assertAlmostEqual(self.ccd.header['CDELT1'], cdelt, places=3)
self.assertEqual(self.ccd.header['DCLOG1'],
'REFSPEC1 = {:s}'.format(calibration_lamp))
class AddWCSKeywordsTest(TestCase):
def setUp(self):
self.test_ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
def test_add_wcs_keys(self):
wcs_keys = ['BANDID1',
'APNUM1',
'WCSDIM',
'CTYPE1',
'CRVAL1',
'CRPIX1',
'CDELT1',
'CD1_1',
'LTM1_1',
'WAT0_001',
'WAT1_001',
'DC-FLAG',
'DCLOG1']
self.test_ccd = add_wcs_keys(ccd=self.test_ccd)
for key in wcs_keys:
self.assertIn(key, self.test_ccd.header)
@skip
def test_add_wcs_keys_error(self):
wcs_keys = ['BANDID1',
'APNUM1',
'WCSDIM',
'CTYPE1',
'CRVAL1',
'CRPIX1',
'CDELT1',
'CD1_1',
'LTM1_1',
'WAT0_001',
'WAT1_001',
'DC-FLAG',
'DCLOG1']
class BiasSubtractTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.ones((100, 100)) * 100,
meta=fits.Header(),
unit='adu')
self.master_bias = CCDData(data=np.ones((100, 100)) * 50,
meta=fits.Header(),
unit='adu')
self.master_bias_name = os.path.join(os.getcwd(),
'master_bias_file.fits')
def test_bias_subtract(self):
ccd = bias_subtract(ccd=self.ccd,
master_bias=self.master_bias,
master_bias_name=self.master_bias_name)
np.testing.assert_array_equal(ccd.data, np.ones((100, 100)) * 50.)
self.assertEqual(ccd.header['GSP_BIAS'],
os.path.basename(self.master_bias_name))
class BinningTest(TestCase):
def test__bin_reference_data(self):
wavelength = np.linspace(3000, 7000, 4000)
intensity = np.random.random_sample(4000)
for i in range(1, 4):
new_wavelength, new_intensity = bin_reference_data(
wavelength=wavelength,
intensity=intensity,
serial_binning=i)
self.assertEqual(len(wavelength), len(intensity))
self.assertEqual(len(new_wavelength), len(new_intensity))
self.assertEqual(len(new_wavelength), np.floor(len(wavelength) / i))
class CentralWavelength(TestCase):
def setUp(self):
# 400m2
self.grating = '400'
self.grating_angle = 7.5
self.camera_angle = 16.1
self.reference_central_wavelength = 7001.54 * u.angstrom
def test_get_central_wavelength(self):
central_wavelength = get_central_wavelength(grating=self.grating,
grt_ang=self.grating_angle,
cam_ang=self.camera_angle)
self.assertAlmostEqual(central_wavelength.value,
self.reference_central_wavelength.value,
places=2)
class ClassifySpectroscopicData(TestCase):
def setUp(self):
self.path = os.path.join(
os.getcwd(),
'goodman_pipeline/data/test_data/test_classify_spectroscopic');
if not os.path.isdir(self.path):
os.mkdir(self.path)
def tearDown(self):
if os.path.isdir(self.path):
shutil.rmtree(self.path)
def create_fake_spectroscopic_data(self):
if os.path.isdir(self.path):
card_values = [
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:23:24.285', 'obsdec': '-39:12:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:26:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:27:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:28:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:23:34.285', 'obsdec': ' 39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:23:34.285', 'obsdec': ' 39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:23:34.285', 'obsdec': ' 39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:23:34.285', 'obsdec': ' 39:13:53.954'},
{'obstype': 'OBJECT', 'object': 'NGC2070', 'obsra': '16:23:34.285', 'obsdec': ' 39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:22:34.285', 'obsdec': '-39:23:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:24:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:25:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:26:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:27:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:28:34.285', 'obsdec': '-39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:23:34.285', 'obsdec': '39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:23:34.285', 'obsdec': '39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:23:34.285', 'obsdec': '39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:23:34.285', 'obsdec': '39:13:53.954'},
{'obstype': 'COMP', 'object': 'HgArNe', 'obsra': '16:23:34.285', 'obsdec': '39:13:53.954'}]
for i in range(len(card_values)):
ccd = CCDData(data=np.ones((3, 3)),
meta=fits.Header(),
unit='adu')
ccd.header.set('DATE', value='2019-03-22', comment='nc')
ccd.header.set('SLIT', value='1.0" long slit', comment='nc')
ccd.header.set('DATE-OBS', value='2019-03-22T09:59:33.654', comment='nc')
ccd.header.set('OBSTYPE', value=card_values[i]['obstype'], comment='nc')
ccd.header.set('OBJECT', value=card_values[i]['object'], comment='nc')
ccd.header.set('EXPTIME', value='10', comment='nc')
ccd.header.set('OBSRA', value=card_values[i]['obsra'], comment='nc')
ccd.header.set('OBSDEC', value=card_values[i]['obsdec'], comment='nc')
ccd.header.set('GRATING', value='SYZY_400', comment='nc')
ccd.header.set('CAM_TARG', value='16.1', comment='nc')
ccd.header.set('GRT_TARG', value='7.5', comment='nc')
ccd.header.set('FILTER', value='<NO FILTER>', comment='nc')
ccd.header.set('FILTER2', value='GG455', comment='nc')
ccd.header.set('GAIN', value='1.48', comment='nc')
ccd.header.set('RDNOISE', value='3.89', comment='nc')
ccd.write(os.path.join(self.path, 'test_file_{:03d}.fits'.format(i)))
def test_classify_spectroscopic_data__no_data(self):
self.assertRaises(SystemExit, classify_spectroscopic_data, self.path, '*fits')
def test_classify_spectroscopic_data(self):
self.create_fake_spectroscopic_data()
result = classify_spectroscopic_data(path=self.path, search_pattern='test_file')
self.assertIsInstance(result, NightDataContainer)
self.assertFalse(result.is_empty)
class CombineDataTest(TestCase):
def setUp(self):
self.ccd1 = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.ccd1.header.set('OBJECT', value='TestObject')
self.ccd1.header.set('GRATING', value='Grating')
self.ccd1.header.set('SLIT', value='1.05SlitSize')
self.ccd2 = self.ccd1.copy()
self.ccd2.data *= 2
self.ccd3 = self.ccd1.copy()
self.ccd3.data *= 5
self.ccd1.header.set('GSP_FNAM', value='prefix_0001_image.fits')
self.ccd2.header.set('GSP_FNAM', value='prefix_0002_image.fits')
self.ccd3.header.set('GSP_FNAM', value='prefix_0003_image.fits')
self.image_list = [self.ccd1, self.ccd2, self.ccd3]
self.dest_path = os.getcwd()
self.prefix = 'testing_'
self.output_name = 'combine_data.fits'
def tearDown(self):
using_output_name_file_name = os.path.join(
self.dest_path,
self.output_name)
if os.path.isfile(using_output_name_file_name):
os.unlink(using_output_name_file_name)
not_using_output_name_file_name_list = os.listdir(self.dest_path)
if not_using_output_name_file_name_list:
for _file in not_using_output_name_file_name_list:
if '{:s}comb_'.format(self.prefix) in _file:
os.unlink(_file)
def test_combine_data_median_prefix_ignored(self):
combined = combine_data(
image_list=self.image_list,
dest_path=self.dest_path,
prefix=self.prefix,
output_name=self.output_name,
method='median',
save=True)
np.testing.assert_array_equal(combined.data, np.ones((100, 100)) * 1.5)
self.assertEqual(len(combined.header['GSP_IC*']), 3)
self.assertTrue(self.prefix not in combined.header['GSP_FNAM'])
def test_combine_data_median_prefix_used(self):
combined = combine_data(
image_list=self.image_list,
dest_path=self.dest_path,
prefix=self.prefix,
method='median',
save=True)
np.testing.assert_array_equal(combined.data, np.ones((100, 100)) * 1.5)
self.assertEqual(len(combined.header['GSP_IC*']), 3)
self.assertTrue(self.prefix in combined.header['GSP_FNAM'])
class CosmicRayRejectionTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.file_name = 'cr_test.fits'
self.ccd.header.set('CCDSUM', value='1 1')
self.ccd.header.set('OBSTYPE', value='OBJECT')
self.ccd.header.set('INSTCONF', value='Red')
self.ccd.header.set('GSP_FNAM', value=self.file_name)
self.ccd.header.set('GSP_COSM', value='none')
self.red_path = os.getcwd()
self.out_prefix = 'prefix'
@skip
def test_dcr_cosmicray_rejection(self):
pass
def test_call_cosmic_rejection_default_1x1(self):
prefix = 'new_'
initial_value = self.ccd.data[50, 50]
self.ccd.data[50, 50] = 50000
ccd, out_prefix = call_cosmic_rejection(ccd=self.ccd,
image_name=self.file_name,
out_prefix=self.out_prefix,
red_path=self.red_path,
keep_files=True,
prefix=prefix,
method='default',
save=False)
self.assertAlmostEqual(initial_value, ccd.data[50, 50])
self.assertEqual(out_prefix, prefix + self.out_prefix)
self.assertEqual(ccd.header['GSP_FNAM'],
prefix + self.out_prefix + self.file_name)
self.assertEqual(ccd.header['GSP_COSM'], 'DCR')
self.assertTrue(os.path.isfile('dcr.par'))
@mock.patch('subprocess.Popen', side_effect=fake_subprocess_popen)
def test_call_cosmic_rejection_default_1x1_no_dcr_par(self,
subprocess_Popen_function):
self.assertRaises(SystemExit,
call_cosmic_rejection,
self.ccd,
self.file_name,
self.out_prefix,
self.red_path,
os.getcwd())
@mock.patch('subprocess.Popen.communicate',
side_effect=fake_dcr_communicate_stderr_no_dcr)
def test_dcr_cosmicray_rejection_no_dcr_executable(
self, subprocess_Popen_communicate):
self.assertRaises(SystemExit,
dcr_cosmicray_rejection,
self.red_path,
self.file_name,
'c',
os.getcwd())
def test_call_cosmic_rejection_default_2x2(self):
self.ccd.header.set('CCDSUM', value='2 2')
prefix = 'new_'
initial_value = self.ccd.data[50, 50]
self.ccd.data[50, 50] = 50000
ccd, out_prefix = call_cosmic_rejection(ccd=self.ccd,
image_name=self.file_name,
out_prefix=self.out_prefix,
red_path=self.red_path,
keep_files=True,
prefix=prefix,
method='default',
save=True)
self.assertAlmostEqual(initial_value, ccd.data[50, 50])
self.assertEqual(out_prefix, prefix + self.out_prefix)
self.assertEqual(ccd.header['GSP_FNAM'],
prefix + self.out_prefix + self.file_name)
self.assertEqual(ccd.header['GSP_COSM'], 'LACosmic')
self.assertTrue(os.path.isfile('new_prefixcr_test.fits'))
def test_call_cosmic_rejection_default_3x3(self):
self.ccd.header.set('CCDSUM', value='3 3')
prefix = 'new_'
initial_value = self.ccd.data[50, 50]
self.ccd.data[50, 50] = 50000
ccd, out_prefix = call_cosmic_rejection(ccd=self.ccd,
image_name=self.file_name,
out_prefix=self.out_prefix,
red_path=self.red_path,
keep_files=True,
prefix=prefix,
method='default',
save=True)
self.assertAlmostEqual(initial_value, ccd.data[50, 50])
self.assertEqual(out_prefix, prefix + self.out_prefix)
self.assertEqual(ccd.header['GSP_FNAM'],
prefix + self.out_prefix + self.file_name)
self.assertEqual(ccd.header['GSP_COSM'], 'LACosmic')
self.assertTrue(os.path.isfile('new_prefixcr_test.fits'))
def test_call_cosmic_rejection_none(self):
prefix = 'new_'
ccd, out_prefix = call_cosmic_rejection(ccd=self.ccd,
image_name=self.file_name,
out_prefix=self.out_prefix,
red_path=self.red_path,
keep_files=True,
prefix=prefix,
method='none',
save=True)
self.assertEqual(out_prefix, self.out_prefix)
self.assertEqual(ccd.header['GSP_FNAM'],
self.out_prefix + self.file_name)
self.assertEqual(ccd.header['GSP_COSM'], 'none')
self.assertTrue(os.path.isfile('prefixcr_test.fits'))
def test_call_cosmic_rejection_comp_lamp(self):
self.ccd.header.set('OBSTYPE', value='COMP')
prefix = 'new_'
ccd, out_prefix = call_cosmic_rejection(ccd=self.ccd,
image_name=self.file_name,
out_prefix=self.out_prefix,
red_path=self.red_path,
keep_files=True,
prefix=prefix,
method='lacosmic',
save=True)
self.assertEqual(out_prefix, prefix + self.out_prefix)
self.assertEqual(ccd.header['GSP_FNAM'],
prefix + self.out_prefix + self.file_name)
self.assertEqual(ccd.header['GSP_COSM'], 'none')
def test_call_cosmic_rejection_not_implemented_error(self):
prefix = 'new_'
self.assertRaises(NotImplementedError,
call_cosmic_rejection,
self.ccd,
self.file_name,
self.out_prefix,
self.red_path,
True,
prefix,
'not_implemented_method',
True)
def tearDown(self):
files_to_delete = ['dcr.par',
'goodman_log.txt',
'cosmic_test.fits',
'new_prefixcr_test.fits',
'prefixcr_test.fits',
'crmask_cr_test.fits']
for _file in files_to_delete:
if os.path.isfile(_file):
os.unlink(_file)
class CreateMasterBias(TestCase):
def setUp(self):
self.name = ''
self.bias_files = ['bias_{}.fits'.format(i) for i in range(1, 12)]
self.raw_data = os.getcwd()
self.reduced_data = os.getcwd()
self.overscan_region = '[1:10,1:100]'
self.trim_section = '[11:90,11:90]'
self.ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.ccd.data[11:90, 10:90] += 1
self.ccd.header.set('CCDSUM', value='1 1')
self.ccd.header.set('INSTCONF', value='Red')
self.ccd.header.set('RDNOISE', value=3.89)
self.ccd.header.set('GAIN', value=1.48)
for _file in self.bias_files:
self.ccd.write(os.path.join(self.reduced_data, _file))
def tearDown(self):
for _file in self.bias_files:
os.unlink(os.path.join(self.reduced_data, _file))
if self.name != '':
for _file in [self.name]:
os.unlink(_file)
def test_create_master_bias(self):
master, self.name = create_master_bias(
bias_files=self.bias_files,
raw_data=self.raw_data,
reduced_data=self.reduced_data,
technique='Spectroscopy')
self.assertTrue('master_bias' in self.name)
self.assertEqual('master_bias_RED_SP_1x1_R03.89_G01.48.fits', self.name)
self.assertTrue(all(
[master.header[key] in self.bias_files for key in master.header['GSP_IC*'].keys()]))
class CreateMasterFlatsTest(TestCase):
def setUp(self):
self.flat_files = ['flat_{}.fits'.format(i) for i in range(1, 12)]
self.raw_data = os.getcwd()
self.reduced_data = os.getcwd()
self.master_flat_name = 'master_flat.fits'
self.saturation_level = 69257
self.overscan_region = '[1:10,1:100]'
self.trim_section = '[11:90,11:90]'
self.ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.ccd.data[11:90, 10:90] += 5
self.ccd.header.set('CCDSUM', value='1 1')
self.ccd.header.set('INSTCONF', value='Red')
self.ccd.header.set('GAIN', value=1.48)
self.ccd.header.set('RDNOISE', value=3.89)
for _file in self.flat_files:
self.ccd.write(os.path.join(self.reduced_data, _file))
self.bias = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.bias.header.set('CCDSUM', value='1 1')
self.bias.write(os.path.join(self.reduced_data, 'master_bias.fits'))
def tearDown(self):
for _file in self.flat_files:
os.unlink(os.path.join(self.reduced_data, _file))
for _file in ['master_bias.fits', self.master_flat_name]:
if os.path.isfile(_file):
os.unlink(_file)
def test_create_master_flats_no_bias(self):
master, name = create_master_flats(
flat_files=self.flat_files,
raw_data=self.raw_data,
reduced_data=self.reduced_data,
technique='Spectroscopy',
overscan_region=self.overscan_region,
trim_section=self.trim_section,
master_bias_name='master_bias.fits',
new_master_flat_name=self.master_flat_name,
saturation_threshold=1,
ignore_bias=True)
self.assertEqual(self.master_flat_name, os.path.basename(name))
self.assertTrue(os.path.isfile(name))
self.assertEqual('none', master.header['GSP_BIAS'])
def test_create_master_flats_with_bias(self):
master, name = create_master_flats(
flat_files=self.flat_files,
raw_data=self.raw_data,
reduced_data=self.reduced_data,
technique='Spectroscopy',
overscan_region=self.overscan_region,
trim_section=self.trim_section,
master_bias_name=os.path.join(self.reduced_data,
'master_bias.fits'),
new_master_flat_name=os.path.join(self.reduced_data,
self.master_flat_name),
saturation_threshold=1,
ignore_bias=False)
self.assertEqual(self.master_flat_name, os.path.basename(name))
self.assertTrue(os.path.isfile(name))
self.assertEqual('master_bias.fits', master.header['GSP_BIAS'])
def test_create_master_flats_saturated_flats(self):
file_to_replace = os.path.join(self.reduced_data, self.flat_files[0])
os.unlink(file_to_replace)
self.assertFalse(os.path.isfile(file_to_replace))
self.ccd.data[11:90, 10:90] += 2 * self.saturation_level
self.ccd.header.set('OBJECT', value='saturated')
self.ccd.write(os.path.join(self.reduced_data, self.flat_files[0]),
overwrite=True)
master, name = create_master_flats(
flat_files=self.flat_files,
raw_data=self.raw_data,
reduced_data=self.reduced_data,
technique='Spectroscopy',
overscan_region=self.overscan_region,
trim_section=self.trim_section,
master_bias_name='master_bias.fits',
new_master_flat_name=self.master_flat_name,
saturation_threshold=1,
ignore_bias=False)
self.assertNotEqual(self.flat_files[0], master.header['GSP_IC01'])
def test_create_master_flats_empty_list(self):
master, name = create_master_flats(
flat_files=[],
raw_data=self.raw_data,
reduced_data=self.reduced_data,
technique='Spectroscopy',
overscan_region=self.overscan_region,
trim_section=self.trim_section,
master_bias_name='master_bias.fits',
new_master_flat_name=self.master_flat_name,
saturation_threshold=1,
ignore_bias=False)
self.assertIsNone(master)
self.assertIsNone(name)
class CrossCorrelationTest(TestCase):
def setUp(self):
self.binning = 1
self.size = 5000
self.reference_array = np.ones(self.size)
self.compared_array = np.ones(self.size)
self.x_axis = np.arange(0, self.size, 1)
self.gaussian = models.Gaussian1D(stddev=5, amplitude=3000)
self.gaussian.mean.value = int(self.size / 2.)
self.reference_array += self.gaussian(self.x_axis)
def test_cross_correlation_small_slit(self):
offset = 500
self.gaussian.mean.value -= offset
self.compared_array += self.gaussian(self.x_axis)
correlation_value = cross_correlation(reference=self.reference_array,
compared=self.compared_array,
slit_size=1,
serial_binning=1,
mode='full',
plot=False)
self.assertEqual(offset, correlation_value)
def test_cross_correlation_large_slit(self):
offset = 500
self.gaussian.mean.value -= offset
self.compared_array += self.gaussian(self.x_axis)
box_kernel = Box1DKernel(width=5 / 0.15)
self.compared_array = convolve(self.compared_array, box_kernel)
correlation_value = cross_correlation(reference=self.reference_array,
compared=self.compared_array,
slit_size=5,
serial_binning=1,
mode='full',
plot=False)
self.assertEqual(offset, correlation_value)
class DefineTrimSectionTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.ccd.header.set('CCDSUM', value='1 1')
self.ccd.header.set('TRIMSEC', value='[10:10,10:10]')
self.full_path = os.path.join(os.getcwd(), 'testfile.fits')
self.ccd.write(self.full_path)
def test_define_trim_section_spectroscopy(self):
expected_trim_section = '[51:4110,2:100]'
trim_section = define_trim_section(sample_image=self.full_path,
technique='Spectroscopy')
self.assertEqual(expected_trim_section, trim_section)
def test_define_trim_section_spectroscopy_2x2(self):
self.ccd.header.set('CCDSUM', value='2 2')
self.ccd.write(self.full_path, overwrite=True)
expected_trim_section = '[26:2055,2:100]'
trim_section = define_trim_section(sample_image=self.full_path,
technique='Spectroscopy')
self.assertEqual(expected_trim_section, trim_section)
def test_define_trim_section_imaging(self):
expected_trim_section = '[10:10,10:10]'
trim_section = define_trim_section(sample_image=self.full_path,
technique='Imaging')
self.assertEqual(expected_trim_section, trim_section)
def tearDown(self):
if os.path.isfile(self.full_path):
os.unlink(self.full_path)
class EvaluateWavelengthSolutionTest(TestCase):
def test__evaluate_solution(self):
differences = np.array([0.5] * 10)
clipped_differences = np.ma.masked_array(differences,
mask=[0,
0,
1,
0,
0,
1,
0,
0,
1,
0])
rms_error, n_points, n_rej = evaluate_wavelength_solution(
clipped_differences=clipped_differences)
self.assertEqual(rms_error, 0.5)
self.assertEqual(n_points, 10)
self.assertEqual(n_rej, 3)
class ExtractionTest(TestCase):
def setUp(self):
self.fake_image = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.fake_image.header.set('NAXIS', value=2)
self.fake_image.header.set('NAXIS1', value=100)
self.fake_image.header.set('NAXIS2', value=100)
self.fake_image.header.set('OBSTYPE', value='COMP')
self.fake_image.header['GSP_FNAM'] = 'fake-image.fits'
# Create model aligned with pixels - represents the trace
self.target_trace = models.Linear1D(slope=0, intercept=50.3)
# Calculate the STDDEV
self.stddev = 8.4
# Calculate how many STDDEV will be extracted - N_STDDEV
self.n_stddev = 2
# Calculate how far the background is from the the center.
self.distance = 1
self.target_profile_gaussian = models.Gaussian1D(amplitude=1,
mean=50.3,
stddev=self.stddev)
self.target_profile_moffat = models.Moffat1D(amplitude=1,
x_0=50.3,
gamma=self.stddev)
self.reference_result_gaussian = np.ones(
100) * self.target_profile_gaussian.fwhm * self.n_stddev
self.reference_result_moffat = np.ones(
100) * self.target_profile_moffat.fwhm * self.n_stddev
def test_fractional_extraction(self):
# Perform extraction
extracted_array, background, info = extract_fractional_pixel(
ccd=self.fake_image,
target_trace=self.target_trace,
target_fwhm=self.target_profile_gaussian.fwhm,
extraction_width=self.n_stddev,
background_spacing=self.distance)
# assert isinstance(fake_image, CCDData)
self.assertIsInstance(extracted_array, CCDData)
np.testing.assert_array_almost_equal(extracted_array,
self.reference_result_gaussian)
def test_fractional_extraction_obstype_object(self):
self.fake_image.header.set('OBSTYPE', value='OBJECT')
# Perform extraction
extracted_array, background, info = extract_fractional_pixel(
ccd=self.fake_image,
target_trace=self.target_trace,
target_fwhm=self.stddev,
extraction_width=self.n_stddev,
background_spacing=self.distance)
# assert isinstance(fake_image, CCDData)
self.assertIsInstance(extracted_array, CCDData)
np.testing.assert_array_almost_equal(extracted_array,
np.zeros(extracted_array.shape))
def test_fractional_sum(self):
fake_image = np.ones((100, 100))
low_limit = 50 + np.random.random()
high_limit = 60 + np.random.random()
frac_sum = fractional_sum(fake_image, 50, low_limit, high_limit)
self.assertEqual(frac_sum, high_limit - low_limit)
def test_extract_optimal(self):
self.assertRaises(NotImplementedError, extract_optimal)
def test_extract__optimal_not_implemented(self):
self.assertRaises(NotImplementedError,
extraction,
self.fake_image,
self.target_trace,
self.target_profile_gaussian,
'optimal')
def test_extraction_gaussian(self):
extracted = extraction(ccd=self.fake_image,
target_trace=self.target_trace,
spatial_profile=self.target_profile_gaussian,
extraction_name='fractional')
self.assertIsInstance(extracted, CCDData)
np.testing.assert_array_almost_equal(extracted, self.reference_result_gaussian)
def test_extraction_moffat(self):
spatial_profile_moffat = models.Moffat1D(amplitude=self.target_profile_gaussian.amplitude.value,
x_0=self.target_profile_gaussian.mean.value,
gamma=self.target_profile_gaussian.stddev.value)
extracted = extraction(ccd=self.fake_image,
target_trace=self.target_trace,
spatial_profile=spatial_profile_moffat,
extraction_name='fractional')
self.assertIsInstance(extracted, CCDData)
np.testing.assert_array_almost_equal(extracted, self.reference_result_moffat)
def test_extraction_not_implemented_model(self):
spatial_profile = models.BlackBody1D()
self.assertRaises(NotImplementedError, extraction, ccd=self.fake_image,
target_trace=self.target_trace,
spatial_profile=spatial_profile,
extraction_name='fractional')
def test_extraction_exception(self):
self.assertRaises(NotImplementedError, extraction, ccd=self.fake_image,
target_trace=self.target_trace,
spatial_profile=self.target_profile_gaussian,
extraction_name='optimal')
class FitsFileIOAndOps(TestCase):
def setUp(self):
self.fake_image = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.file_name = 'sample_file.fits'
self.target_non_zero = 4
self.current_directory = os.getcwd()
self.full_path = os.path.join(self.current_directory, self.file_name)
self.parent_file = 'parent_file.fits'
self.fake_image.header.set('CCDSUM',
value='1 1',
comment='Fake values')
self.fake_image.header.set('OBSTYPE',
value='OBJECT',
comment='Fake values')
self.fake_image.header.set('GSP_FNAM',
value=self.file_name,
comment='Fake values')
self.fake_image.header.set('GSP_PNAM',
value=self.parent_file,
comment='Fake values')
self.fake_image.write(self.full_path, overwrite=False)
def test_write_fits(self):
self.assertTrue(os.path.isfile(self.full_path))
os.remove(self.full_path)
write_fits(ccd=self.fake_image,
full_path=self.full_path,
parent_file=self.parent_file,
overwrite=False)
self.assertTrue(os.path.isfile(self.full_path))
def test_write_fits_directory_does_not_exist(self):
os.unlink(self.full_path)
base_path = os.path.dirname(self.full_path)
self.full_path = os.path.join(base_path,
'testdir',
os.path.basename(self.full_path))
self.assertFalse(os.path.isdir(os.path.dirname(self.full_path)))
write_fits(ccd=self.fake_image,
full_path=self.full_path,
parent_file=self.parent_file,
overwrite=True)
self.assertTrue(os.path.isdir(os.path.dirname(self.full_path)))
def test_read_fits(self):
self.fake_image.header.remove('GSP_PNAM')
self.fake_image.write(self.full_path, overwrite=True)
self.recovered_fake_image = read_fits(self.full_path)
self.assertIsInstance(self.recovered_fake_image, CCDData)
def test_image_overscan(self):
data_value = 100.
overscan_value = 0.1
# alter overscan region to a lower number
self.fake_image.data *= data_value
self.fake_image.data[:, 0:5] = overscan_value
overscan_region = '[1:6,:]'
self.assertEqual(self.fake_image.data[:, 6:99].mean(), data_value)
self.assertEqual(self.fake_image.data[:, 0:5].mean(), overscan_value)
self.fake_image = image_overscan(ccd=self.fake_image,
overscan_region=overscan_region)
self.assertEqual(self.fake_image.data[:, 6:99].mean(),
data_value - overscan_value)
self.assertEqual(self.fake_image.header['GSP_OVER'], overscan_region)
def test_image_overscan_none(self):
new_fake_image = image_overscan(ccd=self.fake_image,
overscan_region=None)
self.assertEqual(new_fake_image, self.fake_image)
def test_image_trim(self):
self.assertEqual(self.fake_image.data.shape, (100, 100))
trim_section = '[1:50,:]'
self.fake_image = image_trim(ccd=self.fake_image,
trim_section=trim_section,
trim_type='trimsec')
self.assertEqual(self.fake_image.data.shape, (100, 50))
self.assertEqual(self.fake_image.header['GSP_TRIM'], trim_section)
def test_image_trim_invalid_type(self):
self.assertEqual(self.fake_image.data.shape, (100, 100))
trim_section = '[1:50,:]'
self.fake_image = image_trim(ccd=self.fake_image,
trim_section=trim_section,
trim_type='invalid_type')
self.assertEqual(self.fake_image.data.shape, (100, 50))
self.assertEqual(self.fake_image.header['GSP_TRIM'], trim_section)
def test_image_trim_trim_section_none(self):
self.assertEqual(self.fake_image.data.shape, (100, 100))
self.fake_image = image_trim(ccd=self.fake_image,
trim_section=None,
trim_type='trimsec')
self.assertEqual(self.fake_image.data.shape, (100, 100))
def test_save_extracted_target_zero(self):
self.fake_image.header.set('GSP_FNAM', value=self.file_name)
same_fake_image = save_extracted(ccd=self.fake_image,
destination=self.current_directory,
prefix='e',
target_number=0)
self.assertEqual(same_fake_image, self.fake_image)
self.assertTrue(os.path.isfile('e' + self.file_name))
def test_save_extracted_target_non_zero(self):
self.fake_image.header.set('GSP_FNAM', value=self.file_name)
same_fake_image = save_extracted(ccd=self.fake_image,
destination=self.current_directory,
prefix='e',
target_number=self.target_non_zero)
self.assertEqual(same_fake_image, self.fake_image)
self.assertTrue(os.path.isfile('e' + re.sub('.fits',
'_target_{:d}.fits'.format(
self.target_non_zero),
self.file_name)))
def test_save_extracted_target_zero_comp(self):
self.fake_image.header.set('GSP_FNAM', value=self.file_name)
self.fake_image.header.set('OBSTYPE', value='ARC')
self.fake_image.header.set('GSP_EXTR', value='100.00:101.00')
same_fake_image = save_extracted(ccd=self.fake_image,
destination=self.current_directory,
prefix='e',
target_number=0)
expected_new_name = 'e' + re.sub(
'.fits',
'_' + re.sub(':', '-', same_fake_image.header['GSP_EXTR']) + '.fits',
self.file_name)
self.assertEqual(same_fake_image, self.fake_image)
self.assertEqual(same_fake_image.header['GSP_FNAM'], expected_new_name)
self.assertTrue(os.path.isfile(self.fake_image.header['GSP_FNAM']))
def tearDown(self):
files_to_remove = [self.full_path, self.fake_image.header['GSP_FNAM']]
for _file in files_to_remove:
if os.path.isfile(_file):
os.unlink(_file)
if 'testdir' in self.full_path:
os.rmdir(os.path.dirname(self.full_path))
class FixKeywordsTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.full_path = os.path.join(os.getcwd(), 'sample_file.fits')
self.ccd.write(self.full_path)
def tearDown(self):
if os.path.isfile(self.full_path):
os.unlink(self.full_path)
def test_fix_keywords(self):
# not really testing anything here
fix_keywords(path=os.getcwd(), pattern="*.fits")
class GenerateDcrFile(TestCase):
def setUp(self):
self.create = GenerateDcrParFile()
self.ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.ccd.header.set('INSTCONF', value='Red')
self.ccd.header.set('CCDSUM', value='1 1')
def test_generate_dcr_par_file(self):
serial, parallel = self.ccd.header['CCDSUM'].split()
instrument = self.ccd.header['INSTCONF']
self.assertEqual(serial, '1')
self.assertEqual(instrument, 'Red')
self.assertEqual(self.create._file_name, 'dcr.par')
self.assertIsInstance(self.create._df, pandas.DataFrame)
self.assertFalse(os.path.isfile(self.create._file_name))
self.create()
self.assertTrue(os.path.isfile(self.create._file_name))
self.assertRaises(AssertionError, self.create, 'Green')
def tearDown(self):
if os.path.isfile(self.create._file_name):
os.remove(self.create._file_name)
class GetLinesInLampTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.ones(5000),
meta=fits.Header(),
unit='adu')
self.ccd.header.set('SLIT', value='1.0_LONG_SLIT')
self.ccd.header.set('CCDSUM', value='1 1')
self.ccd.header.set('OBJECT', value='TestSubject')
self.line_centers = np.arange(100, 4900, 200)
self.gaussian = models.Gaussian1D(amplitude=10000, stddev=3)
for line_center in self.line_centers:
self.gaussian.mean.value = line_center
self.ccd.data += self.gaussian(range(len(self.ccd.data)))
def test_get_lines_in_lamp_wrong_input(self):
expected_none = get_lines_in_lamp(ccd=list(range(100)))
self.assertIsNone(expected_none)
def test_get_lines_in_lamp_narrow_slit(self):
recovered_lines = get_lines_in_lamp(ccd=self.ccd, plots=False)
np.testing.assert_allclose(self.line_centers, recovered_lines)
def test_get_lines_in_lamp_broad_slit(self):
self.ccd.header.set('SLIT', value='5.0_LONG_SLIT')
box_kernel = Box1DKernel(width=5.0 / 0.15)
self.ccd.data = convolve(self.ccd.data, box_kernel)
recovered_lines = get_lines_in_lamp(ccd=self.ccd, plots=False)
np.testing.assert_allclose(self.line_centers, recovered_lines, atol=0.6)
class GetOverscanRegionTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.ccd.header.set('CCDSUM', value='1 1')
self.ccd.header.set('TRIMSEC', value='[10:10,10:10]')
self.full_path = os.path.join(os.getcwd(), 'testfile.fits')
self.ccd.write(self.full_path)
def tearDown(self):
if os.path.isfile(self.full_path):
os.unlink(self.full_path)
def test_get_overscan_region_spectroscopy_blue(self):
self.ccd.header.set('INSTCONF', 'Blue')
self.ccd.write(self.full_path, overwrite=True)
expected_overscan = '[1:16,1:100]'
overscan_region = get_overscan_region(sample_image=self.full_path,
technique='Spectroscopy')
self.assertEqual(expected_overscan, overscan_region)
def test_get_overscan_region_spectroscopy_red(self):
self.ccd.header.set('INSTCONF', 'Red')
self.ccd.write(self.full_path, overwrite=True)
expected_overscan = '[6:49,1:100]'
overscan_region = get_overscan_region(sample_image=self.full_path,
technique='Spectroscopy')
self.assertEqual(expected_overscan, overscan_region)
def test_get_overscan_region_imaging(self):
overscan_region = get_overscan_region(sample_image=self.full_path,
technique='Imaging')
self.assertIsNone(overscan_region)
def test_get_overscan_region_other_technique(self):
overscan_region = get_overscan_region(sample_image=self.full_path,
technique='AnyOther')
self.assertIsNone(overscan_region)
class GetSpectralCharacteristicsTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.random.random_sample(200),
meta=fits.Header(),
unit='adu')
self.pixel_size = 15 * u.micrometer
self.goodman_focal_length = 377.3 * u.mm
def test__get_spectral_characteristics(self):
self.ccd.header.set('SLIT', '1.0_LONG_SLIT')
self.ccd.header.set('GRATING', 'SYZY_400')
self.ccd.header.set('GRT_ANG', 7.5)
self.ccd.header.set('CAM_ANG', 16.1)
self.ccd.header.set('CCDSUM', '1 1')
spec_charact = get_spectral_characteristics(
ccd=self.ccd,
pixel_size=self.pixel_size,
instrument_focal_length=self.goodman_focal_length)
self.assertIsInstance(spec_charact, dict)
self.assertEqual(len(spec_charact), 7)
class InterpolationTest(TestCase):
def test_interpolate(self):
initial_array = np.sin(np.arange(0, 3 * np.pi))
initial_length = len(initial_array)
new_x_axis, new_array = interpolate(spectrum=initial_array,
interpolation_size=100)
self.assertEqual(len(new_x_axis), len(new_array))
self.assertEqual(len(new_array), initial_length * 100)
class IsFileSaturatedTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.ccd.header.set('INSTCONF', value='Red')
self.ccd.header.set('GAIN', value=1.48)
self.ccd.header.set('RDNOISE', value=3.89)
self.half_full_well = 69257
def test_file_is_saturated(self):
self.ccd.data[:10, :10] = self.half_full_well + 1
self.assertTrue(is_file_saturated(ccd=self.ccd, threshold=1))
def test_file_is_not_saturated(self):
self.ccd.data[:10, :10] = self.half_full_well + 1
self.ccd.data[0, 0] = 1
self.assertFalse(is_file_saturated(ccd=self.ccd, threshold=1))
class LinearizeSpectrumTest(TestCase):
def setUp(self):
feature = models.Gaussian1D(amplitude=500, mean=3000, stddev=5)
self.data = np.ones(5000) + feature(range(5000))
self.solution_model = models.Polynomial1D(degree=3)
self.solution_model.c0.value = 3500
self.solution_model.c1.value = 1
self.solution_model.c2.value = 1e-7
self.non_linear_x_axis = self.solution_model(range(len(self.data)))
self.feature_center = self.non_linear_x_axis[3000]
def test_linearize_spectrum_nans_in_data(self):
self.data[0:10] = np.nan
self.assertRaises(SystemExit,
linearize_spectrum,
self.data,
self.solution_model)
def test_linearize_spectrum_wrong_input(self):
linear_data = linearize_spectrum(data=self.data,
wavelength_solution=None)
self.assertIsNone(linear_data)
def test_linearize_spectrum(self):
linear_x_axis, linear_data = linearize_spectrum(data=self.data,
wavelength_solution=self.solution_model)
new_gaussian = models.Gaussian1D(amplitude=100,
mean=self.feature_center,
stddev=5)
fitter = fitting.LevMarLSQFitter()
fitted_linear = fitter(new_gaussian, linear_x_axis, linear_data)
np.testing.assert_array_almost_equal(self.feature_center,
fitted_linear.mean.value,
decimal=2)
class MasterFlatTest(TestCase):
def setUp(self):
# create a master flat
self.master_flat = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.master_flat.header.set('GRATING', value='RALC_1200-BLUE')
self.master_flat.header.set('SLIT', value='0.84" long slit')
self.master_flat.header.set('FILTER2', value='<NO FILTER>')
self.master_flat.header.set('WAVMODE', value='1200 m2')
self.master_flat_name = 'master_flat_1200m2.fits'
# expected master flat to be retrieved by get_best_flat
self.reference_flat_name = 'master_flat_1200m2_0.84_dome.fits'
# location of sample flats
self.flat_path = 'goodman_pipeline/data/test_data/master_flat'
slit = re.sub('[A-Za-z" ]',
'',
self.master_flat.header['SLIT'])
self.flat_name_base = re.sub('.fits',
'_' + slit + '*.fits',
self.master_flat_name)
# save a master flat with some random structure.
self.master_flat_name_norm = 'flat_to_normalize.fits'
# add a bias level
self.master_flat.data += 300.
# add noise
self.master_flat.data += np.random.random_sample(
self.master_flat.data.shape)
self.master_flat.write(os.path.join(self.flat_path,
self.master_flat_name_norm),
overwrite=False)
def tearDown(self):
full_path = os.path.join(self.flat_path,
self.master_flat_name_norm)
self.assertTrue(os.path.isfile(full_path))
if os.path.isfile(full_path):
os.unlink(full_path)
self.assertFalse(os.path.isfile(full_path))
# remove normalized flat
norm_flat = re.sub('flat_to_', 'norm_flat_to_', full_path)
if os.path.isfile(norm_flat):
os.unlink(norm_flat)
self.assertFalse(os.path.isfile(norm_flat))
def test_get_best_flat(self):
# print(self.flat_name_base)
master_flat, master_flat_name = get_best_flat(
flat_name=self.flat_name_base,
path=self.flat_path)
self.assertIsInstance(master_flat, CCDData)
self.assertEqual(os.path.basename(master_flat_name),
self.reference_flat_name)
def test_get_best_flat_fail(self):
# Introduce an error that will never produce a result.
wrong_flat_name = re.sub('1200m2', '1300m2', self.flat_name_base)
master_flat, master_flat_name = get_best_flat(
flat_name=wrong_flat_name,
path=self.flat_path)
self.assertIsNone(master_flat)
self.assertIsNone(master_flat_name)
def test_normalize_master_flat(self):
methods = ['mean', 'simple', 'full']
for method in methods:
self.assertNotAlmostEqual(self.master_flat.data.mean(), 1.)
normalized_flat, normalized_flat_name = normalize_master_flat(
master=self.master_flat,
name=os.path.join(self.flat_path,
self.master_flat_name_norm),
method=method)
self.assertAlmostEqual(normalized_flat.data.mean(), 1.,
delta=0.001)
self.assertEqual(normalized_flat.header['GSP_NORM'], method)
self.assertIn('norm_', normalized_flat_name)
class NameMasterFlatsTest(TestCase):
def setUp(self):
self.reduced_data = os.getcwd()
#reference
date = '2019-08-28'
self.twilight_start_evening = '2019-08-27T23:45:00.022'
self.twilight_end_morning = '2019-08-28T09:43:20.023'
self.sun_set_time = '2019-08-27T22:21:00.437'
self.sun_rise_time = '2019-08-28T11:07:11.851'
self.header = fits.Header()
self.header.set('GRATING', value='400_SYZY')
self.header.set('GRT_TARG', value=7.5)
self.header.set('CAM_TARG', value=16.1)
self.header.set('FILTER', value='<NO FILTER>')
self.header.set('FILTER2', value='<NO FILTER>')
self.header.set('SLIT', value='1.0_LONG_SLIT')
def test_name_master_flats_spectroscopy(self):
expected_name = 'master_flat_TestSubject_400m2_GG455_1.0_dome.fits'
self.header.set('FILTER2', value='GG455')
self.header.set('DATE-OBS', value='2019-08-27T20:21:00.437')
flat_name = name_master_flats(
header=self.header,
technique='Spectroscopy',
reduced_data=self.reduced_data,
sun_set=self.sun_set_time,
sun_rise=self.sun_rise_time,
evening_twilight=self.twilight_start_evening,
morning_twilight=self.twilight_end_morning,
target_name='TestSubject',
get=False)
self.assertEqual(expected_name, os.path.basename(flat_name))
def test_name_master_flats_spectroscopy_no_grating_no_filter2(self):
expected_name = 'master_flat_TestSubject_no_grating_1.0_sky.fits'
self.header.set('GRATING', value='<NO GRATING>')
self.header.set('DATE-OBS', value='2019-08-27T23:40:00.022')
flat_name = name_master_flats(
header=self.header,
technique='Spectroscopy',
reduced_data=self.reduced_data,
sun_set=self.sun_set_time,
sun_rise=self.sun_rise_time,
evening_twilight=self.twilight_start_evening,
morning_twilight=self.twilight_end_morning,
target_name='TestSubject',
get=False)
self.assertEqual(expected_name, os.path.basename(flat_name))
def test_name_master_flats_imaging_no_filter(self):
expected_name = 'master_flat_NO_FILTER_night.fits'
self.header.set('DATE-OBS', value='2019-08-27T23:50:00.022')
flat_name = name_master_flats(
header=self.header,
technique='Imaging',
reduced_data=self.reduced_data,
sun_set=self.sun_set_time,
sun_rise=self.sun_rise_time,
evening_twilight=self.twilight_start_evening,
morning_twilight=self.twilight_end_morning,
target_name='TestSubject',
get=False)
self.assertEqual(expected_name, os.path.basename(flat_name))
def test_name_master_flats_imaging(self):
expected_name = 'master_flat_u_BESSEL_night.fits'
self.header.set('DATE-OBS', value='2019-08-27T23:50:00.022')
self.header.set('FILTER', value='u-BESSEL')
flat_name = name_master_flats(
header=self.header,
technique='Imaging',
reduced_data=self.reduced_data,
sun_set=self.sun_set_time,
sun_rise=self.sun_rise_time,
evening_twilight=self.twilight_start_evening,
morning_twilight=self.twilight_end_morning,
target_name='TestSubject',
get=False)
self.assertEqual(expected_name, os.path.basename(flat_name))
def test_name_master_flats_get_true(self):
expected_name = 'master_flat_TestSubject_400m2_GG455_1.0*.fits'
self.header.set('FILTER2', value='GG455')
self.header.set('DATE-OBS', value='2019-08-27T20:21:00.437')
flat_name = name_master_flats(
header=self.header,
technique='Spectroscopy',
reduced_data=self.reduced_data,
sun_set=self.sun_set_time,
sun_rise=self.sun_rise_time,
evening_twilight=self.twilight_start_evening,
morning_twilight=self.twilight_end_morning,
target_name='TestSubject',
get=True)
self.assertEqual(expected_name, os.path.basename(flat_name))
class NightDataContainerTests(TestCase):
def setUp(self):
self.container = NightDataContainer(path=os.getcwd(),
instrument='Red',
technique='Spectroscopy')
columns = ['file', 'obstype']
sample_data_1 = [['file1.fits', 'OBJECT']]
sample_data_2 = [['file1.fits', 'OBJECT'],
['file2.fits', 'OBJECT']]
self.sample_df_1 = pandas.DataFrame(sample_data_1, columns=columns)
self.sample_df_2 = pandas.DataFrame(sample_data_2, columns=columns)
def test___repr___method_empty(self):
result = self.container.__repr__()
self.assertEqual(result, 'Empty Data Container')
def test___repr___method_not_empty(self):
self.container.is_empty = False
self.container.gain = 1
self.container.rdnoise = 1
self.container.roi = 'roi'
result = self.container.__repr__()
self.assertIn('Full Path: {:s}'.format(os.getcwd()), result)
self.assertIn('Instrument: Red', result)
self.assertIn('Technique: Spectroscopy', result)
self.assertIn('Is Empty: False', result)
_expected_content = ['Data Grouping Information',
'BIAS Group:',
'Group is Empty',
'Day FLATs Group:',
'Dome FLATs Group:',
'Sky FLATs Group:',
'COMP Group:',
'OBJECT Group',
'OBJECT + COMP Group:']
for _line in _expected_content:
self.assertIn(_line, result)
def test___repr___method_imaging_not_empty(self):
self.container.technique = 'Imaging'
self.container.add_bias(bias_group=self.sample_df_2)
self.container.add_day_flats(day_flats=self.sample_df_1)
self.container.add_data_group(data_group=self.sample_df_2)
#
self.container.dome_flats = [self.sample_df_1]
self.container.sky_flats = [self.sample_df_2]
self.container.gain = 1
self.container.rdnoise = 1
self.container.roi = 'roi'
result = self.container.__repr__()
self.assertNotIn('Group is Empty', result)
@skip
def test__get_group_repr(self):
pass
def test_add_bias_imaging_insufficient_bias(self):
self.container.technique = 'Imaging'
self.container.add_bias(bias_group=self.sample_df_1)
self.assertTrue(self.container.bias is None)
self.assertTrue(self.container.is_empty)
def test_add_bias_spectroscopy_insufficient_bias(self):
self.container.add_bias(bias_group=self.sample_df_1)
self.assertTrue(self.container.bias is None)
self.assertTrue(self.container.is_empty)
def test_add_bias(self):
self.container.add_bias(bias_group=self.sample_df_2)
self.container.add_bias(bias_group=self.sample_df_2)
self.assertFalse(self.container.bias is None)
self.assertFalse(self.container.is_empty)
def test_add_day_flats(self):
self.container.add_day_flats(day_flats=self.sample_df_1)
self.assertIsInstance(self.container.day_flats[0], pandas.DataFrame)
self.container.add_day_flats(day_flats=self.sample_df_2)
self.assertFalse(self.container.day_flats is None)
self.assertFalse(self.container.is_empty)
def test_add_data_group(self):
self.container.add_data_group(data_group=self.sample_df_1)
self.assertIsInstance(self.container.data_groups[0], pandas.DataFrame)
self.container.add_data_group(data_group=self.sample_df_2)
self.assertFalse(self.container.data_groups is None)
self.assertFalse(self.container.is_empty)
def test_add_comp_group(self):
self.container.add_comp_group(comp_group=self.sample_df_1)
self.assertIsInstance(self.container.comp_groups[0], pandas.DataFrame)
self.container.add_comp_group(comp_group=self.sample_df_2)
self.assertFalse(self.container.comp_groups is None)
self.assertFalse(self.container.is_empty)
def test_add_object_group(self):
self.container.add_object_group(object_group=self.sample_df_1)
self.assertIsInstance(self.container.object_groups[0], pandas.DataFrame)
self.container.add_object_group(object_group=self.sample_df_2)
self.assertFalse(self.container.object_groups is None)
self.assertFalse(self.container.is_empty)
def test_add_spec_group(self):
self.container.add_spec_group(spec_group=self.sample_df_1)
self.assertIsInstance(self.container.spec_groups[0], pandas.DataFrame)
self.container.add_spec_group(spec_group=self.sample_df_2)
self.assertFalse(self.container.spec_groups is None)
self.assertFalse(self.container.is_empty)
def test_set_sun_times(self):
_sun_set = '2019-01-01T18:00:00'
_sun_rise = '2019-01-01T06:00:00'
self.container.set_sun_times(sun_set=_sun_set, sun_rise=_sun_rise)
self.assertEqual(self.container.sun_set_time, _sun_set)
self.assertEqual(self.container.sun_rise_time, _sun_rise)
def test_set_twilight_times(self):
_evening = '2019-01-01T18:00:00'
_morning = '2019-01-01T06:00:00'
self.container.set_twilight_times(evening=_evening, morning=_morning)
self.assertEqual(self.container.evening_twilight, _evening)
self.assertEqual(self.container.morning_twilight, _morning)
def test_set_readout(self):
_gain = 1.48
_rdnoise = 3.89
_roi = 'Spectroscopic 2x2'
self.container.set_readout(gain=_gain, rdnoise=_rdnoise, roi=_roi)
self.assertEqual(self.container.gain, _gain)
self.assertEqual(self.container.rdnoise, _rdnoise)
self.assertEqual(self.container.roi, _roi)
class RaDecConversion(TestCase):
def setUp(self):
self.ra = '19:09:55.026'
self.dec = '-68:18:01.901'
self.reference_ra = 287.479275
self.reference_dec = -68.3005281
def test_ra_dec_to_deg_negative_dec(self):
radeg, decdeg = ra_dec_to_deg(right_ascension=self.ra,
declination=self.dec)
self.assertAlmostEqual(radeg, self.reference_ra)
self.assertAlmostEqual(decdeg, self.reference_dec)
def test_ra_dec_to_deg_positive_dec(self):
self.dec = '68:18:01.901'
radeg, decdeg = ra_dec_to_deg(right_ascension=self.ra,
declination=self.dec)
self.assertAlmostEqual(radeg, self.reference_ra)
self.assertAlmostEqual(decdeg, -1 * self.reference_dec)
class RecordTraceInformationTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.ones((800, 2000)),
meta=fits.Header(),
unit='adu')
self.all_keywords = ['GSP_TMOD',
'GSP_TORD',
'GSP_TC00',
'GSP_TC01',
'GSP_TC02',
'GSP_TERR']
self.trace_info = collections.OrderedDict()
self.trace_info['GSP_TMOD'] = ['Polinomial1D',
'Model name used to fit trace']
self.trace_info['GSP_TORD'] = [2, 'Degree of the model used to fit '
'target trace']
self.trace_info['GSP_TC00'] = [500, 'Parameter c0']
self.trace_info['GSP_TC01'] = [1, 'Parameter c1']
self.trace_info['GSP_TC02'] = [2, 'Parameter c2']
self.trace_info['GSP_TERR'] = [0.5, 'RMS error of target trace']
def test_record_trace_information(self):
ccd = record_trace_information(ccd=self.ccd, trace_info=self.trace_info)
new_keys = [key for key in ccd.header.keys()]
self.assertTrue(all([key in new_keys for key in self.all_keywords]))
self.assertEqual(ccd.header['GSP_TMOD'], 'Polinomial1D')
self.assertEqual(ccd.header['GSP_TORD'], 2)
class ReferenceDataTest(TestCase):
def setUp(self):
self.rd = ReferenceData(
reference_dir=os.path.join(os.getcwd(),
'goodman_pipeline/data/ref_comp'))
self.ccd = CCDData(data=np.ones((800, 2000)),
meta=fits.Header(),
unit='adu')
self.ccd.header.set('GRATING', value='400_SYZY')
self.ccd.header.set('GRT_TARG', value=7.5)
self.ccd.header.set('CAM_TARG', value=16.1)
self.columns = ['object',
'grating',
'grt_targ',
'cam_targ',
'lamp_hga',
'lamp_ne',
'lamp_ar',
'lamp_fe',
'lamp_cu',]
self.data_exist = [
['HgArNe',
'400_SYZY',
7.5,
16.1,
'TRUE',
'TRUE',
'FALSE',
'FALSE',
'FALSE'],
['HgAr',
'400_SYZY',
7.5,
16.1,
'TRUE',
'FALSE',
'FALSE',
'FALSE',
'FALSE']]
self.data_does_not_exist = [
['HgArNe',
'SYZY_800',
7.5,
16.1,
'TRUE',
'TRUE',
'FALSE',
'FALSE',
'FALSE'],
['HgAr',
'SYZY_800',
7.5,
16.1,
'TRUE',
'FALSE',
'FALSE',
'FALSE',
'FALSE']]
def test_get_reference_lamp_exist_with_lamps_status_key(self):
self.ccd.header.set('LAMP_HGA', value='TRUE')
self.ccd.header.set('LAMP_NE', value='TRUE')
self.ccd.header.set('LAMP_AR', value='FALSE')
self.ccd.header.set('LAMP_FE', value='FALSE')
self.ccd.header.set('LAMP_CU', value='FALSE')
self.ccd.header.set('LAMP_QUA', value='FALSE')
self.ccd.header.set('LAMP_QPE', value=0)
self.ccd.header.set('LAMP_BUL', value='FALSE')
self.ccd.header.set('LAMP_DOM', value='FALSE')
self.ccd.header.set('LAMP_DPE', value=0)
self.ccd.header.set('WAVMODE', value='400_M2')
ref_lamp = self.rd.get_reference_lamp(header=self.ccd.header)
self.assertIsInstance(ref_lamp, CCDData)
self.assertEqual(ref_lamp.header['LAMP_HGA'], self.ccd.header['LAMP_HGA'])
self.assertEqual(ref_lamp.header['LAMP_NE'], self.ccd.header['LAMP_NE'])
self.assertEqual(ref_lamp.header['WAVMODE'], self.ccd.header['WAVMODE'])
def test_get_reference_lamp_no_match_status_keys(self):
self.ccd.header.set('LAMP_HGA', value='TRUE')
self.ccd.header.set('LAMP_NE', value='TRUE')
self.ccd.header.set('LAMP_AR', value='TRUE')
self.ccd.header.set('LAMP_FE', value='TRUE')
self.ccd.header.set('LAMP_CU', value='TRUE')
self.ccd.header.set('LAMP_QUA', value='TRUE')
self.ccd.header.set('LAMP_QPE', value=0)
self.ccd.header.set('LAMP_BUL', value='FALSE')
self.ccd.header.set('LAMP_DOM', value='FALSE')
self.ccd.header.set('LAMP_DPE', value=0)
self.ccd.header.set('WAVMODE', value='400_M2')
self.assertRaises(NoMatchFound,
self.rd.get_reference_lamp,
self.ccd.header)
def test_get_reference_lamp_exist_with_object_key(self):
self.ccd.header.set('OBJECT', value='HgArNe')
self.ccd.header.set('WAVMODE', value='400_M2')
self.ccd.header.set('GSP_P', value='1')
self.ccd.header.set('GSP_P', value='2')
self.ccd.header.set('GSP_P', value='3')
self.ccd.header.set('GSP_P', value='4')
self.ccd.header.set('GSP_A', value='0')
self.ccd.header.set('GSP_A', value='100')
self.ccd.header.set('GSP_A', value='0')
self.ccd.header.set('GSP_A', value='200')
ref_lamp = self.rd.get_reference_lamp(header=self.ccd.header)
self.assertIsInstance(ref_lamp, CCDData)
self.assertEqual(ref_lamp.header['OBJECT'], self.ccd.header['OBJECT'])
self.assertEqual(ref_lamp.header['WAVMODE'], self.ccd.header['WAVMODE'])
def test_get_reference_lamp_does_not_exist(self):
self.ccd.header.set('OBJECT', value='HgArCu')
self.ccd.header.set('WAVMODE', value='400_M5')
self.assertRaises(NoMatchFound,
self.rd.get_reference_lamp,
self.ccd.header)
def test_lamp_exist(self):
self.ccd.header.set('LAMP_HGA', value='TRUE')
self.ccd.header.set('LAMP_NE', value='TRUE')
self.ccd.header.set('LAMP_AR', value='FALSE')
self.ccd.header.set('LAMP_FE', value='FALSE')
self.ccd.header.set('LAMP_CU', value='FALSE')
self.ccd.header.set('LAMP_QUA', value='FALSE')
self.ccd.header.set('LAMP_QPE', value=0)
self.ccd.header.set('LAMP_BUL', value='FALSE')
self.ccd.header.set('LAMP_DOM', value='FALSE')
self.ccd.header.set('LAMP_DPE', value=0)
self.ccd.header.set('WAVMODE', value='400_M2')
self.assertTrue(self.rd.lamp_exists(header=self.ccd.header))
# HgArNeCu is impossible
self.ccd.header.set('LAMP_CU', value='TRUE')
self.assertFalse(self.rd.lamp_exists(header=self.ccd.header))
def test_check_comp_group__lamp_exists(self):
comp_group = pandas.DataFrame(self.data_exist,
columns=self.columns)
new_group = self.rd.check_comp_group(comp_group=comp_group)
self.assertIsInstance(new_group, pandas.DataFrame)
self.assertFalse(comp_group.equals(new_group))
self.assertEqual(len(new_group), 1)
def test_check_comp_group__lamp_does_not_exist(self):
comp_group = pandas.DataFrame(self.data_does_not_exist,
columns=self.columns)
new_group = self.rd.check_comp_group(comp_group=comp_group)
self.assertIsInstance(new_group, pandas.DataFrame)
self.assertTrue(comp_group.equals(new_group))
def test__order_validation(self):
should_be_true = self.rd._order_validation(range(10))
self.assertTrue(should_be_true)
should_be_false = self.rd._order_validation(range(10)[::-1])
self.assertFalse(should_be_false)
def test__load_nist_list(self):
self.assertIsInstance(self.rd.nist, dict)
self.assertEqual(0, len(self.rd.nist))
self.rd._load_nist_list()
self.assertIsInstance(self.rd.nist, dict)
self.assertGreater(len(self.rd.nist), 0)
class SaturationValuesTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.ccd.header.set('INSTCONF', value='Red')
self.ccd.header.set('GAIN', value=1.48)
self.ccd.header.set('RDNOISE', value=3.89)
self.half_full_well = 69257
self.saturation_values = SaturationValues(ccd=self.ccd)
def test_half_full_well_value(self):
self.assertEqual(self.saturation_values.saturation_value,
self.half_full_well)
def test_empty_result(self):
self.ccd.header['GAIN'] = 2.3
result = self.saturation_values.get_saturation_value(ccd=self.ccd)
self.assertIsNone(result)
self.assertIsNone(self.saturation_values.saturation_value)
class SearchCompGroupTest(TestCase):
def setUp(self):
columns = ['object',
'grating',
'cam_targ',
'grt_targ',
'filter',
'filter2',
'lamp_hga',
'lamp_ne',
'lamp_ar',
'lamp_fe',
'lamp_cu']
self.object_group = pandas.DataFrame(
data=[['NGC2070',
'SYZY_400',
16.1,
7.5,
'<NO FILTER>',
'GG455',
'TRUE',
'FALSE',
'FALSE',
'FALSE',
'FALSE'
]],
columns=columns)
self.object_group_no_match = pandas.DataFrame(
data=[['NGC2070',
'SYZY_600',
16.1,
7.5,
'<NO FILTER>',
'GG455',
'TRUE',
'FALSE',
'FALSE',
'FALSE',
'FALSE']],
columns=columns)
self.comp_groups = [
pandas.DataFrame(
data=[['HgArNe',
'SYZY_400',
16.1,
7.5,
'<NO FILTER>',
'GG455',
'TRUE',
'FALSE',
'FALSE',
'FALSE',
'FALSE']], columns=columns),
pandas.DataFrame(
data=[['CuArNe',
'SYZY_400',
11.6,
5.8,
'<NO FILTER>',
'GG455',
'TRUE',
'FALSE',
'FALSE',
'FALSE',
'FALSE']], columns=columns)]
self.reference_data = ReferenceData(
reference_dir=os.path.join(os.getcwd(),
'goodman_pipeline/data/ref_comp'))
def test_search_comp_group(self):
result = search_comp_group(
object_group=self.object_group,
comp_groups=self.comp_groups,
reference_data=self.reference_data)
self.assertIsInstance(result, pandas.DataFrame)
self.assertFalse(result.empty)
def test_search_comp_group_no_match(self):
with self.assertRaises(NoMatchFound):
search_comp_group(
object_group=self.object_group_no_match,
comp_groups=self.comp_groups,
reference_data=self.reference_data)
class SlitTrimTest(TestCase):
# TODO (simon): discuss with Bruno
def setUp(self):
# Create fake image
self.fake_image = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
# define
self.slit_low_limit = 5
self.slit_high_limit = 95
self.reference_slit_trim = '[1:100,{:d}:{:d}]'.format(
self.slit_low_limit + 10 + 1,
self.slit_high_limit - 10)
# make a flat-like structure
self.fake_image.data[self.slit_low_limit:self.slit_high_limit, :] = 100
def test_get_slit_trim_section__slit_within_data(self):
slit_trim = get_slit_trim_section(master_flat=self.fake_image)
# print(fake_image.data[:,5])
# print(slit_trim)
self.assertEqual(slit_trim, self.reference_slit_trim)
def test_get_slit_trim_section__slit_full_data(self):
self.fake_image.data[:, :] = 100
slit_trim = get_slit_trim_section(master_flat=self.fake_image)
# print(fake_image.data[:,5])
self.assertEqual(slit_trim, '[1:100,1:100]')
def test_image_trim_slit(self):
# # define
# slit_low_limit = 5
# slit_high_limit = 95
#
# slit_trim = '[1:100,{:d}:{:d}]'.format(slit_low_limit + 10 + 1,
# slit_high_limit - 10)
self.fake_image = image_trim(ccd=self.fake_image,
trim_section=self.reference_slit_trim,
trim_type='slit')
self.assertIsInstance(self.fake_image, CCDData)
reference_size = (self.slit_high_limit - 10) - \
(self.slit_low_limit + 10)
self.assertEqual(self.fake_image.data.shape, (reference_size, 100))
self.assertEqual(self.fake_image.header['GSP_SLIT'],
self.reference_slit_trim)
class SpectroscopicModeTest(TestCase):
def setUp(self):
self.sm = SpectroscopicMode()
self.ccd = CCDData(data=np.ones((800, 2000)),
meta=fits.Header(),
unit='adu')
self.ccd.header.set('GRATING', value='SYZY_400')
self.ccd.header.set('CAM_TARG', value='16.1')
self.ccd.header.set('GRT_TARG', value='7.5')
self.ccd.header.set('FILTER2', value='GG455')
def test__call__(self):
self.assertRaises(SyntaxError, self.sm)
mode_m2_header = self.sm(header=self.ccd.header)
self.assertEqual(mode_m2_header, 'm2')
mode_m2_keywords = self.sm(grating=self.ccd.header['GRATING'],
camera_targ=self.ccd.header['CAM_TARG'],
grating_targ=self.ccd.header['GRT_TARG'],
blocking_filter=self.ccd.header['FILTER2'])
self.assertEqual(mode_m2_keywords, 'm2')
def test_get_mode(self):
mode_m2 = self.sm.get_mode(grating='400',
camera_targ='16.1',
grating_targ='7.5',
blocking_filter='GG455')
self.assertEqual(mode_m2, 'm2')
mode_custom_400 = self.sm.get_mode(grating='400',
camera_targ='16.1',
grating_targ='6.6',
blocking_filter='GG455')
self.assertEqual(mode_custom_400, 'Custom_7000nm')
mode_custom_2100 = self.sm.get_mode(grating='2100',
camera_targ='16.1',
grating_targ='7.5',
blocking_filter='GG455')
self.assertEqual(mode_custom_2100, 'Custom_1334nm')
def test_get_cam_grt_targ_angle(self):
cam_targ, grt_targ = self.sm.get_cam_grt_targ_angle(1800, 'm10')
self.assertIsNone(cam_targ)
self.assertIsNone(grt_targ)
cam_targ, grt_targ = self.sm.get_cam_grt_targ_angle(930, 'm5')
self.assertEqual(cam_targ, '39.4')
self.assertEqual(grt_targ, '19.7')
cam_targ, grt_targ = self.sm.get_cam_grt_targ_angle(930, 'm7')
self.assertIsNone(cam_targ)
self.assertIsNone(grt_targ)
class TargetsTest(TestCase):
def setUp(self):
self.ccd = CCDData(data=np.ones((800, 2000)),
meta=fits.Header(),
unit='adu')
self.ccd.header.set('GSP_FNAM',
value='fake-name.fits',
comment='Fake file name')
self.profile_1 = models.Gaussian1D(amplitude=200,
mean=100,
stddev=10).rename('Profile_1')
self.profile_2 = models.Gaussian1D(amplitude=200,
mean=600,
stddev=10).rename('Profile_2')
self.profile_3 = models.Moffat1D(amplitude=200,
x_0=600,
gamma=3).rename('Profile_3')
profile_sum = self.profile_1 + self.profile_2
self.ccd2 = self.ccd.copy()
self.no_targets_ccd = self.ccd.copy()
for i in range(self.ccd.data.shape[1]):
self.ccd.data[:, i] *= profile_sum(range(self.ccd.data.shape[0]))
self.ccd2.data[:, i] *= self.profile_3(
range(self.ccd2.data.shape[0]))
# this add noise to test the removal of masked values
# self.ccd.data[
# random.randrange(self.ccd.data.shape[0]),
# random.randrange(self.ccd.data.shape[1])] *= 300
# self.ccd2.data[
# random.randrange(self.ccd2.data.shape[0]),
# random.randrange(self.ccd2.data.shape[1])] *= 300
def tearDown(self):
del self.ccd
del self.profile_1
del self.profile_2
del self.profile_3
def test_identify_targets_moffat(self):
self.ccd.header.set('OBSTYPE',
value='OBJECT',
comment='Fake values')
self.ccd.header.set('SLIT',
value='1.03" long slit',
comment='Fake slit')
self.ccd.header.set('CCDSUM',
value='1 1',
comment='Fake values')
targets = identify_targets(ccd=self.ccd,
fit_model='moffat',
background_threshold=3,
nfind=2,
plots=False)
self.assertEqual(len(targets), 2)
for target in targets:
self.assertIsInstance(target, Model)
def test_identify_targets_gaussian(self):
self.ccd.header.set('OBSTYPE',
value='OBJECT',
comment='Fake values')
self.ccd.header.set('SLIT',
value='1.03" long slit',
comment='Fake slit')
self.ccd.header.set('CCDSUM',
value='1 1',
comment='Fake values')
targets = identify_targets(ccd=self.ccd,
fit_model='gaussian',
background_threshold=3,
nfind=2,
plots=False)
self.assertEqual(len(targets), 2)
for target in targets:
self.assertIsInstance(target, Model)
def test_identify_targets_empty_output(self):
self.no_targets_ccd.header.set('OBSTYPE',
value='OBJECT',
comment='Fake values')
self.no_targets_ccd.header.set('SLIT',
value='1.03" long slit',
comment='Fake slit')
self.no_targets_ccd.header.set('CCDSUM',
value='1 1',
comment='Fake values')
targets = identify_targets(ccd=self.no_targets_ccd,
fit_model='gaussian',
background_threshold=3,
nfind=2,
plots=False)
self.assertEqual(len(targets), 0)
def test_trace_gaussian(self):
trace_model = models.Polynomial1D(degree=2)
fitter = fitting.LevMarLSQFitter()
test_trace, trace_rms = trace(ccd=self.ccd,
model=self.profile_1,
trace_model=trace_model,
model_fitter=fitter,
sampling_step=5)
self.assertEqual(test_trace.c0.value, self.profile_1.mean.value)
self.assertAlmostEqual(test_trace.c1.value, 0.)
self.assertAlmostEqual(test_trace.c2.value, 0.)
def test_trace_moffat(self):
trace_model = models.Polynomial1D(degree=2)
fitter = fitting.LevMarLSQFitter()
test_trace, trace_rms = trace(ccd=self.ccd2,
model=self.profile_3,
trace_model=trace_model,
model_fitter=fitter,
sampling_step=5)
self.assertEqual(test_trace.c0.value, self.profile_3.x_0.value)
self.assertAlmostEqual(test_trace.c1.value, 0.)
self.assertAlmostEqual(test_trace.c2.value, 0.)
def test_trace_not_implemented(self):
trace_model = models.Polynomial1D(degree=2)
fitter = fitting.LevMarLSQFitter()
self.assertRaises(NotImplementedError,
trace,
self.ccd2,
models.BlackBody1D(),
trace_model,
fitter,
5)
def test_trace_targets(self):
targets = [self.profile_1, self.profile_2]
all_traces = trace_targets(ccd=self.ccd,
target_list=targets,
sampling_step=5,
pol_deg=2,
nfwhm=2,
plots=False)
for new_trace, profile, trace_info in all_traces:
self.assertEqual(new_trace.c0.value, profile.mean.value)
self.assertAlmostEqual(new_trace.c1.value, 0)
self.assertAlmostEqual(new_trace.c2.value, 0)
class TimeConversionTest(TestCase):
def setUp(self):
self.test_time_str = '2018-01-17T12:05:44.250'
self.test_time_sec = 1516190744.0
def test_convert_time(self):
self.assertEqual(convert_time(self.test_time_str), self.test_time_sec)
def test_get_twilight_time(self):
expected_evening_twilight = '2018-01-17T01:21:26.113'
expected_morning_twilight = '2018-01-17T08:24:38.919'
expected_sun_set_time = '2018-01-17T23:43:46.782'
expected_sun_rise_time = '2018-01-17T10:02:04.508'
evening_twilight, morning_twilight, sun_set, sun_rise\
= get_twilight_time([self.test_time_str])
self.assertEqual(evening_twilight, expected_evening_twilight)
self.assertEqual(morning_twilight, expected_morning_twilight)
self.assertEqual(sun_set, expected_sun_set_time)
self.assertEqual(sun_rise, expected_sun_rise_time)
class ValidateCcdRegionTest(TestCase):
def test_validate_ccd_region_valid(self):
self.assertTrue(validate_ccd_region, '[1:1,10:10]')
def test_validate_ccd_region_invalid(self):
self.assertRaises(SyntaxError, validate_ccd_region, "10:10:10]")
|
soar-telescope/goodman
|
goodman_pipeline/core/tests/test_core.py
|
Python
|
bsd-3-clause
| 93,118
|
[
"Gaussian"
] |
8c1ff876834d2de1893b0341a249b83ae408c72efb93c5d7b4b45d2a9ace98ce
|
from ase.db.core import connect
class IdCollisionError(Exception):
pass
|
grhawk/ASE
|
tools/ase/db/__init__.py
|
Python
|
gpl-2.0
| 78
|
[
"ASE"
] |
29b77e4554560a2360a3621dcd23c966f5f5b31f306386708b204347e3546558
|
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# The basic notion of a tree has a parent, a payload, and a list of children.
# It is the most abstract interface for all the trees used by ANTLR.
#/
from antlr4.Token import Token
INVALID_INTERVAL = (-1, -2)
class Tree(object):
def __str__(self):
return unicode(self)
class SyntaxTree(Tree):
pass
class ParseTree(SyntaxTree):
pass
class RuleNode(ParseTree):
pass
class TerminalNode(ParseTree):
pass
class ErrorNode(TerminalNode):
pass
class ParseTreeVisitor(object):
def visit(self, tree):
return tree.accept(self)
def visitChildren(self, node):
result = self.defaultResult()
n = node.getChildCount()
for i in range(n):
if not self.shouldVisitNextChild(node, result):
return
c = node.getChild(i)
childResult = c.accept(self)
result = self.aggregateResult(result, childResult)
return result
def visitTerminal(self, node):
return self.defaultResult()
def visitErrorNode(self, node):
return self.defaultResult()
def defaultResult(self):
return None
def aggregateResult(self, aggregate, nextResult):
return nextResult
def shouldVisitNextChild(self, node, currentResult):
return True
class ParseTreeListener(object):
def visitTerminal(self, node):
pass
def visitErrorNode(self, node):
pass
def enterEveryRule(self, ctx):
pass
def exitEveryRule(self, ctx):
pass
class TerminalNodeImpl(TerminalNode):
def __init__(self, symbol):
self.parentCtx = None
self.symbol = symbol
def getChild(self, i):
return None
def getSymbol(self):
return self.symbol
def getParent(self):
return self.parentCtx
def getPayload(self):
return self.symbol
def getSourceInterval(self):
if self.symbol is None:
return INVALID_INTERVAL
tokenIndex = self.symbol.tokenIndex
return (tokenIndex, tokenIndex)
def getChildCount(self):
return 0
def accept(self, visitor):
return visitor.visitTerminal(self)
def getText(self):
return self.symbol.text
def __unicode__(self):
if self.symbol.type == Token.EOF:
return "<EOF>"
else:
return self.symbol.text
# Represents a token that was consumed during resynchronization
# rather than during a valid match operation. For example,
# we will create this kind of a node during single token insertion
# and deletion as well as during "consume until error recovery set"
# upon no viable alternative exceptions.
class ErrorNodeImpl(TerminalNodeImpl,ErrorNode):
def __init__(self, token):
super(ErrorNodeImpl, self).__init__(token)
def accept(self, visitor):
return visitor.visitErrorNode(self)
class ParseTreeWalker(object):
DEFAULT = None
def walk(self, listener, t):
if isinstance(t, ErrorNode):
listener.visitErrorNode(t)
return
elif isinstance(t, TerminalNode):
listener.visitTerminal(t)
return
self.enterRule(listener, t)
for child in t.getChildren():
self.walk(listener, child)
self.exitRule(listener, t)
#
# The discovery of a rule node, involves sending two events: the generic
# {@link ParseTreeListener#enterEveryRule} and a
# {@link RuleContext}-specific event. First we trigger the generic and then
# the rule specific. We to them in reverse order upon finishing the node.
#
def enterRule(self, listener, r):
ctx = r.getRuleContext()
listener.enterEveryRule(ctx)
ctx.enterRule(listener)
def exitRule(self, listener, r):
ctx = r.getRuleContext()
ctx.exitRule(listener)
listener.exitEveryRule(ctx)
ParseTreeWalker.DEFAULT = ParseTreeWalker()
|
cikelengfeng/HTTPIDL
|
Sources/Compiler/antlr4/tree/Tree.py
|
Python
|
mit
| 4,145
|
[
"VisIt"
] |
9f372cbd5e82e0a7dbe9c81577fd7c26dd259a8864a8380edc2f1cb62f5ada13
|
'''
Created on Jan 5, 2011
@author: Dan Blankenberg
Code from the Galaxy project (http://galaxy.psu.edu)
Contains methods to transform sequence strings
'''
import string
from math import log10
from string import maketrans
# Quality score formats
SANGER_FORMAT = "sanger"
SOLEXA_FORMAT = "solexa"
ILLUMINA_FORMAT = "illumina"
FASTQ_QUAL_FORMATS = [SANGER_FORMAT, SOLEXA_FORMAT, ILLUMINA_FORMAT]
#Translation table for reverse Complement, with ambiguity codes
DNA_COMPLEMENT = string.maketrans( "ACGTRYKMBDHVacgtrykmbdhv", "TGCAYRMKVHDBtgcayrmkvhdb" )
RNA_COMPLEMENT = string.maketrans( "ACGURYKMBDHVacgurykmbdhv", "UGCAYRMKVHDBugcayrmkvhdb" )
#Translation table for DNA <--> RNA
DNA_TO_RNA = string.maketrans( "Tt", "Uu" )
RNA_TO_DNA = string.maketrans( "Uu", "Tt" )
def DNA_complement( sequence ):
'''complement DNA sequence string'''
return sequence.translate( DNA_COMPLEMENT )
def DNA_reverse_complement( sequence ):
'''returns the reverse complement of the sequence'''
return DNA_complement(sequence[::-1])
def to_DNA( sequence ):
return sequence.translate( DNA_TO_RNA )
#complement RNA sequence string
def RNA_complement( sequence ):
return sequence.translate( RNA_COMPLEMENT )
def RNA_reverse_complement( self, sequence ):
return RNA_complement( sequence[::-1] )
def to_RNA( sequence ):
return sequence.translate( RNA_TO_DNA )
def get_solexa_qual_conversion_table():
"""
return a translation table that can be used by str.translate() for
converting solexa to sanger quality scores
"""
offset = 64
conv_table = ['!'] * 256
conv_table[offset:] = "I" * (256-offset)
for solq in xrange(-5, 40):
phredq = 10*log10(1 + 10**(solq/10.0))
phredchr = chr(int(round(33 + phredq)))
conv_table[offset + solq] = phredchr
conv_string = ''.join(conv_table)
return maketrans(''.join(map(chr, range(256))), conv_string)
def get_illumina_qual_conversion_table():
"""Illumina 1.3+ format can encode a Phred quality score from 0 to 62
using ASCII 64 to 126 (although in raw read data Phred scores from 0
to 40 only are expected).
"""
offset = 64
conv_table = ['!'] * 256
for x in xrange(0, 62):
conv_table[offset+x] = chr(33 + x)
conv_table[offset+40:] = "I" * (256-(offset+40))
conv_string = ''.join(conv_table)
return maketrans(''.join(map(chr, range(256))), conv_string)
def get_sanger_qual_conversion_table():
offset = 33
tbl = map(chr, range(256))
tbl[:offset] = "!" * offset
tbl[offset+40:] = "I" * (256-(offset+40))
return maketrans(''.join(map(chr, range(256))), ''.join(tbl))
def get_qual_conversion_func(qual_format):
conv_tables = {SANGER_FORMAT: get_sanger_qual_conversion_table(),
ILLUMINA_FORMAT: get_illumina_qual_conversion_table(),
SOLEXA_FORMAT: get_solexa_qual_conversion_table()}
tbl = conv_tables[qual_format]
return lambda q: q.translate(tbl)
class FASTQRecord:
__slots__ = ("qname", "seq", "qual", "readnum")
def __init__(self, qname, seq, qual, readnum):
self.qname = qname
self.seq = seq
self.qual = qual
self.readnum = readnum
def to_string(self):
return ("@%s/%d\n%s\n+\n%s" %
(self.qname, self.readnum, self.seq, self.qual))
def parse_fastq_record(line_iter,
convert_quals=False,
qual_format=SANGER_FORMAT):
qual_func = get_qual_conversion_func(qual_format)
try:
qname = line_iter.next().rstrip()[1:]
readnum = int(qname[-1])
qname = qname[:-2]
seq = line_iter.next().rstrip()
line_iter.next()
qual = line_iter.next().rstrip()
if convert_quals:
qual = qual_func(qual)
yield FASTQRecord(qname, seq, qual, readnum)
while True:
# qname
qname = line_iter.next().rstrip()[1:]
readnum = int(qname[-1])
qname = qname[:-2]
# seq
seq = line_iter.next().rstrip()
# qname again (skip)
line_iter.next()
# qual
qual = line_iter.next().rstrip()
if convert_quals:
qual = qual_func(qual)
yield FASTQRecord(qname, seq, qual, readnum)
except StopIteration:
pass
def calc_homology(seq1, seq2, num_mismatches):
smallest_len = min(len(seq1), len(seq2))
mm = 0
i = 0
for i in xrange(smallest_len):
if seq1[i] != seq2[i]:
mm += 1
if mm > num_mismatches:
return i
return i + 1
BASES_PER_LINE = 50
def split_seq(seq, chars_per_line=BASES_PER_LINE):
pos = 0
newseq = []
while pos < len(seq):
if pos + chars_per_line > len(seq):
endpos = len(seq)
else:
endpos = pos + chars_per_line
newseq.append(seq[pos:endpos])
pos = endpos
return '\n'.join(newseq)
|
genome-vendor/chimerascan
|
chimerascan/lib/seq.py
|
Python
|
gpl-3.0
| 5,019
|
[
"Galaxy"
] |
922315f8061e2e2d508c673b656d5bcd96b7b09ed60eefc4cf8344d838f816e8
|
'''
user interface for viewing/editing photon optics layouts
'''
from numpy import sin, cos, pi, sqrt, log, array, random, sign
from numpy.linalg import norm
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
#import matplotlib.animation as animation
from ocelot.optics.elements import *
from ocelot.optics.wave import *
def init_plots(views, geo):
scene = Scene()
scene.views = views
scene.fig = plt.figure()
nviews = len(views)
scene.ax = ['']*nviews
scene.profile_im = {}
for iview in range(nviews):
view_id = nviews*100 + 10 + iview + 1
scene.ax[iview] = scene.fig.add_subplot(view_id, autoscale_on=True)
if views[iview] == 'geometry:x' or views[iview] == 'geometry:y':
projection_name = views[iview].split(':')[1]
plot_geometry(scene.ax[iview], geo, projection_name)
scene.ax[iview].grid()
scene.ax[iview].set_title(projection_name)
if views[iview].startswith('detectors'):
if views[iview].startswith('detectors:'):
id = views[iview].split(':')[1]
for obj in geo():
if obj.__class__ == Detector:
print('adding view for detector: ', obj.id)
scene.ax[iview].set_title('detector:' + id)
scene.profile_im[id] = scene.ax[iview]
#scene.profile_im[id] = scene.ax[iview].imshow(obj.matrix.transpose(), cmap='gist_heat',interpolation='none',extent=[0,1,0,1], vmin=0, vmax=10)
return scene
def plot_geometry(ax, geo, proj='y'):
debug("plotting geometry ", proj)
if proj == 'y': idx = 1
if proj == 'x': idx = 0
for o in geo():
if o.__class__ == Mirror:
ang = - np.arctan2(o.no[idx] , o.no[2]) - pi
#print 'ang=', ang
z1 = o.r[2] - o.size[idx] * sin(ang)
z2 = o.r[2]
z3 = o.r[2] + o.size[idx] * sin(ang)
y1 = -o.size[idx] + o.r[idx] + o.size[idx]*(1-cos(ang))
y2 = o.r[idx]
y3 = o.size[idx] + o.r[idx] - o.size[idx]*(1-cos(ang))
li, = ax.plot([z1,z2,z3], [y1,y2,y3], 'b-', lw=3)
y_bnd = np.linspace(y1,y3, 100)
z_bnd = np.linspace(z1, z3, 100)
for z,y in zip(z_bnd[5::10],y_bnd[5::10]):
tick_size = o.size[2]
ax.plot([z,z-tick_size*np.sign(o.no[2])], [y,y-(y_bnd[5] - y_bnd[0])], 'b-', lw=2)
if o.__class__ == EllipticMirror:
#TODO; replace with generic rotation
ang = - np.arctan2(o.no[idx] , o.no[2]) - pi
#print 'ang=', ang
z1 = o.r[2] - o.size[idx] * sin(ang)
z2 = o.r[2]
z3 = o.r[2] + o.size[idx] * sin(ang)
y1 = -o.size[idx] + o.r[idx] + o.size[idx]*(1-cos(ang))
y2 = o.r[idx]
y3 = o.size[idx] + o.r[idx] - o.size[idx]*(1-cos(ang))
#li, = ax.plot([z1,z2,z3], [y1,y2,y3], color="#aa00ff", lw=3)
phi_max = np.arcsin(o.size[idx]/o.a[0])
#y_bnd = np.linspace(y1,y3, 100)
phi_bnd = np.linspace(-phi_max, phi_max, 100)
z_bnd = np.zeros_like(phi_bnd)
y_bnd = np.zeros_like(phi_bnd)
for i in range( len(phi_bnd) ):
z_bnd[i] = o.r[2] + o.a[0]*sin(phi_bnd[i])
y_bnd[i] = o.r[idx] + o.a[1] - o.a[1]*cos(phi_bnd[i])
#for z,y in zip(z_bnd[5:-10:10],y_bnd[5:-10:10]):
n_step = 2
for i in np.arange(0,len(z_bnd) - n_step ,n_step):
tick_size = o.size[2]
#ax.plot([z,z-tick_size*np.sign(o.no[2])], [y,y-(y_bnd[5] - y_bnd[0])], 'b-', lw=2)
ax.plot([z_bnd[i],z_bnd[i+n_step]], [y_bnd[i],y_bnd[i+n_step]], color="#aa00ff", lw=3)
if o.__class__ == ParabolicMirror:
y_bnd = np.linspace(-o.size[idx], o.size[idx], 100)
z_bnd = o.r[2] - o.a[1] * y_bnd**2
#print y_bnd, z_bnd
li, = ax.plot(z_bnd, y_bnd, 'b-', lw=3)
for z,y in zip(z_bnd[5::10],y_bnd[5::10]):
ax.plot([z,z-1.0*np.sign(o.no[2])], [y,y-(y_bnd[5] - y_bnd[0])], 'b-', lw=2)
if o.__class__ == Lense:
y_bnd = np.linspace(-o.D/2,o.D/2,100)
z_bnd1 = (o.r[2]-o.s1) + (o.s1 / (o.D/2)**2 ) * y_bnd**2
z_bnd2 = (o.r[2]+o.s2) - (o.s2 / (o.D/2)**2 ) * y_bnd**2
li, = ax.plot(z_bnd1, y_bnd, 'r-', lw=3)
li, = ax.plot(z_bnd2, y_bnd, 'r-', lw=3)
if o.__class__ == Aperture:
li, = ax.plot([o.r[2],o.r[2]], [o.r[idx] + o.d[idx],o.r[idx] + o.size[idx]], color='#000000', lw=3)
li, = ax.plot([o.r[2],o.r[2]], [o.r[idx] -o.d[idx],o.r[idx] - o.size[idx]], color='#000000', lw=3)
if o.__class__ == Crystal:
li, = ax.plot([o.r[2],o.r[2]], [o.r[idx] - o.size[idx], o.r[idx] + o.size[idx]], color='#999999', lw=3)
if o.__class__ == Grating:
debug("plotting grating")
#TODO; replace with generic rotation
ang = - np.arctan2(o.no[idx] , o.no[2]) - pi
#print 'ang=', ang
z1 = o.r[2] - o.size[idx] * sin(ang)
z2 = o.r[2]
z3 = o.r[2] + o.size[idx] * sin(ang)
y1 = -o.size[idx] + o.r[idx] + o.size[idx]*(1-cos(ang))
y2 = o.r[idx]
y3 = o.size[idx] + o.r[idx] - o.size[idx]*(1-cos(ang))
li, = ax.plot([z1,z2,z3], [y1,y2,y3], color="#AA3377", lw=3)
y_bnd = np.linspace(y1,y3, 100)
z_bnd = np.linspace(z1, z3, 100)
dy = max(abs(y3-y1), abs(z3-z1)) / 20
dz = dy
for z,y in zip(z_bnd[5::10],y_bnd[5::10]):
ax.plot([z-dz,z,z+dz], [y,y+dy, y], color="#AA3377", lw=2)
zmax = np.max([ x.r[2] + x.size[2] for x in geo])
zmin = np.min([x.r[2] - x.size[2] for x in geo])
ymax = np.max( [x.r[1] + x.size[1] for x in geo])
ymin = np.min( [ x.r[1] - x.size[1] for x in geo])
z_margin = (zmax - zmin)*0.1
y_margin = (ymax - ymin)*0.1
#print zmin, zmax, z_margin, ymin, ymax, y_margin
#ax.set_xlim(zmin-z_margin,zmax+z_margin)
#ax.set_ylim(ymin-y_margin,ymax+y_margin)
def plot_rays(ax, rays, proj='x', alpha=0.4):
for r in rays:
debug('plotting ray!', r.r0[0], r.k[0], r.s[0])
for i in range(len(r.r0)):
debug('-->', r.r0[i], r.k[i], r.s[i])
if proj == 'x':
ax.plot([r.r0[i][2], r.r0[i][2] + r.k[i][2]*r.s[i] ], [r.r0[i][0], r.r0[i][0] + r.k[i][0]*r.s[i] ], color='#006600', lw=1, alpha=alpha )
if proj == 'y':
ax.plot([r.r0[i][2], r.r0[i][2] + r.k[i][2]*r.s[i] ], [r.r0[i][1], r.r0[i][1] + r.k[i][1]*r.s[i] ], color='#006600', lw=1, alpha=alpha )
|
ocelot-collab/ocelot
|
ocelot/gui/optics.py
|
Python
|
gpl-3.0
| 7,089
|
[
"CRYSTAL"
] |
79c8101a32d40431f6bb1487cec5fdc204d2fa43bf1994dda82a8861f698601c
|
#!/usr/bin/env python
"Check documentation and input variables"
from __future__ import division, print_function, absolute_import #unicode_literals,
import sys
import os
import os.path
import glob
import re
def usage():
print("\n Usage: docchk \n ")
def abinit_test_generator():
def test_func(abenv):
"Check documentation and input variables"
top = abenv.apath_of("src")
return main(abenv.home_dir)
return {"test_func" : test_func}
def main(home_dir, verbose=False):
home_dir = os.path.abspath(home_dir)
# construct list of input keywords that appear in chkvars.F90
chkvarsf90 = os.path.join(home_dir, "src/57_iovars/chkvars.F90")
if (os.path.isfile(chkvarsf90)):
varfile = open(chkvarsf90)
else:
print(" \n File ", chkvarsf90," not found! ")
sys.exit(2)
in_block = False
words = []
for line in varfile:
if line.find("admitted variable names") > 0:
in_block = True
if line.find("Extra token") > 0:
in_block = False
if in_block == True and line.find("list_var") > 0:
line_words=(line.split("'")[1]).split()
for i in range(len(line_words)):
words.append(line_words[i])
if not words:
print("Found empty list of words in %s " % chkvarsf90)
print("Perhaps someone changed the format of the file?")
print("Please modify the code in " + __file__)
sys.exit(2)
print( " ============================================================= ")
print( " ABINIT Input variables: Regenerate html from abinit_vars.yml ")
print( " ============================================================= ")
pathdocinputdir = os.path.join(home_dir, "doc/input_variables")
cmd = "cd " + pathdocinputdir + " ; rm -f html_automatically_generated/allvariables.html ; python abi_yml2html.py > abi_yml2html.log"
os.system(cmd)
cmd = "cd " + pathdocinputdir + " ; python abi_check.py > abi_check.log"
os.system(cmd)
pathlogfile = os.path.join(home_dir, "doc/input_variables/abi_yml2html.log")
with open(pathlogfile) as logfile:
for line in logfile:
print(line)
pathcheckfile = os.path.join(home_dir, "doc/input_variables/abi_check.log")
with open(pathcheckfile) as checkfile:
for line in checkfile:
print(line)
print( " ============================================================= ")
print( " ABINIT Input variables: Check in documentation ")
print( " ============================================================= ")
varhtml = glob.glob(os.path.join(home_dir, "doc/input_variables/html_automatically_generated/var*html"))
varallvars = glob.glob(os.path.join(home_dir, "doc/input_variables/html_automatically_generated/allvariables.html"))
ret_code = 0
for iwords in range(len(words)):
deffiles = []
for ivarhtml in range(len(varhtml)):
with open(varhtml[ivarhtml]) as fh: varhtmldata = fh.read()
if words[iwords] in varhtmldata:
deffiles.append(varhtml[ivarhtml])
if len(deffiles) > 0:
if verbose: print("SUCCESS: ",words[iwords]," appears in ",len(deffiles)," var*html files ")
else:
print("FAIL: ",words[iwords]," does not appear in any var*html files ")
ret_code += 1
deffiles = []
for ivarallvars in range(len(varallvars)):
with open(varallvars[ivarallvars]) as fh: varallvarsdata = fh.read()
if words[iwords] in varallvarsdata:
deffiles.append(varallvars[ivarallvars])
if len(deffiles) > 0:
if verbose: print("SUCCESS: ",words[iwords]," appears in ",len(deffiles)," allvariables.html file as well")
else:
print("FAIL: ",words[iwords]," does not appear in the central allvariables.html file ")
ret_code += 1
print( " ============================================================= ")
print( " ABINIT Input variables: Check in test suite ")
print( " ============================================================= ")
for iwords in range(len(words)):
autotest = False
for root, dirs, files in os.walk(os.path.join(home_dir, 'tests')):
if root.find("Input")>0:
for ifiles in range(len(files)):
testfilename = os.path.join(root,files[ifiles])
if not testfilename.endswith(".in"):
#print("Ignoring", testfilename)
continue
try:
with open(testfilename, "rt") as fh:
testfileinput = fh.read()
except Exception as exc:
print("FAIL: exception while opening %s\n%s" % (testfilename, str(exc)))
ret_code += 1
continue
if words[iwords] in testfileinput:
autotest = True
break
if autotest:
break
if autotest:
if verbose: print("SUCCESS: ",words[iwords]," appears in automatic test suite ")
else:
print("FAIL: ",words[iwords]," does not appear in automatic test suite ")
ret_code += 1
varfile.close()
# construct list of key words appearing in anaddb input
invars9f90 = os.path.join(home_dir, "src/77_ddb/m_anaddb_dataset.F90")
if (os.path.isfile(invars9f90)):
varfile = open(invars9f90)
else:
print(" \n File ", invars9f90," not found! ")
sys.exit(2)
# Scan the source and search for the calls to intagm. Parse the arguments
# and extract the name of the variable. The prototype of intagm is:
# call intagm(dprarr,intarr,jdtset,marr,1,string(1:lenstr),'brav',tread,'INT')
re_call = re.compile(r'\s*call\s+intagm\((.+)\)\w*', re.I)
words = []
for line in varfile:
m = re_call.match(line)
if m:
tokens = m.group(1).split(",")
assert len(tokens) == 9
words.append(tokens[-3].replace("'","").replace('"',""))
if not words:
print( "Found empty list of words in file %s" % invars9f90)
print( "Perhaps someone changed the format of the file?")
print( "Please modify the code in " + __file__)
sys.exit(2)
#print(words)
print(" ============================================================= ")
print(" ANADDB Input variables: Check in documentation ")
print(" ============================================================= ")
varhtml = os.path.join(home_dir, "doc/users/anaddb_help.html")
for iwords in range(len(words)):
with open(varhtml) as fh: varhtmldata = fh.read()
if words[iwords] in varhtmldata:
if verbose: print ("SUCCESS: ",words[iwords]," appears in ",varhtml)
else:
print ("FAIL: ",words[iwords]," does not appear ",varhtml)
ret_code += 1
print( " ============================================================= ")
print( " ANADDB Input variables: Check in test suite ")
print( " ============================================================= ")
for iwords in range(len(words)):
autotest = False
for root, dirs, files in os.walk(os.path.join(home_dir, 'tests')):
if root.find("Input")>0:
for ifiles in range(len(files)):
testfilename = os.path.join(root,files[ifiles])
if not testfilename.endswith(".in"):
#print("Ignoring:", testfilename)
continue
try:
with open(testfilename, "rt") as fh:
testfileinput = fh.read()
except Exception as exc:
print("FAIL: Exception while readding %s\n%s" % (testfilename, str(exc)))
ret_code += 1
continue
if words[iwords] in testfileinput:
autotest = True
break
if autotest:
break
if autotest:
if verbose:
print("SUCCESS: ",words[iwords]," appears in automatic test suite ")
else:
print("FAIL: ",words[iwords]," does not appear in automatic test suite ")
ret_code += 1
varfile.close()
return ret_code
if __name__ == "__main__":
if len(sys.argv) == 1:
home_dir = os.path.normpath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
else:
home_dir = sys.argv[1]
exit_status = main(home_dir, verbose=False)
sys.exit(exit_status)
|
jmbeuken/abinit
|
abichecks/scripts/docchk.py
|
Python
|
gpl-3.0
| 8,508
|
[
"ABINIT"
] |
84a63192cf268487011fb5469d7f70a3c8a163367a4f179ac17d4ccc658fdcf0
|
'''
'''
from __future__ import print_function
import shutil
from os.path import dirname, exists, join, realpath, relpath
import os, re, subprocess, sys, time
import versioneer
# provide fallbacks for highlights in case colorama is not installed
try:
import colorama
from colorama import Fore, Style
def bright(text): return "%s%s%s" % (Style.BRIGHT, text, Style.RESET_ALL)
def dim(text): return "%s%s%s" % (Style.DIM, text, Style.RESET_ALL)
def red(text): return "%s%s%s" % (Fore.RED, text, Style.RESET_ALL)
def green(text): return "%s%s%s" % (Fore.GREEN, text, Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (Fore.YELLOW, text, Style.RESET_ALL)
sys.platform == "win32" and colorama.init()
except ImportError:
def bright(text): return text
def dim(text): return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
# some functions prompt for user input, handle input vs raw_input (py2 vs py3)
if sys.version_info[0] < 3:
input = raw_input # NOQA
# -----------------------------------------------------------------------------
# Module global variables
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
# -----------------------------------------------------------------------------
# Helpers for command line operations
# -----------------------------------------------------------------------------
def show_bokehjs(bokehjs_action, develop=False):
''' Print a useful report after setuptools output describing where and how
BokehJS is installed.
Args:
bokehjs_action (str) : one of 'built', 'installed', or 'packaged'
how (or if) BokehJS was installed into the python source tree
develop (bool, optional) :
whether the command was for "develop" mode (default: False)
Returns:
None
'''
print()
if develop:
print("Installed Bokeh for DEVELOPMENT:")
else:
print("Installed Bokeh:")
if bokehjs_action in ['built', 'installed']:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if bokehjs_action=='built' else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
print()
def show_help(bokehjs_action):
''' Print information about extra Bokeh-specific command line options.
Args:
bokehjs_action (str) : one of 'built', 'installed', or 'packaged'
how (or if) BokehJS was installed into the python source tree
Returns:
None
'''
print()
if bokehjs_action in ['built', 'installed']:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build-js build and install a fresh BokehJS")
print(" --install-js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print("No extra Bokeh-specific options are available.")
print()
# -----------------------------------------------------------------------------
# Other functions used directly by setup.py
# -----------------------------------------------------------------------------
def build_or_install_bokehjs():
''' Build a new BokehJS (and install it) or install a previously build
BokehJS.
If no options ``--build-js`` or ``--install-js`` are detected, the
user is prompted for what to do.
If ``--existing-js`` is detected, then this setup.py is being run from a
packaged sdist, no action is taken.
Note that ``-build-js`` is only compatible with the following ``setup.py``
commands: install, develop, sdist, egg_info, build
Returns:
str : one of 'built', 'installed', 'packaged'
How (or if) BokehJS was installed into the python source tree
'''
# This happens when building from inside a published, pre-packaged sdist
# The --existing-js option is not otherwise documented
if '--existing-js' in sys.argv:
sys.argv.remove('--existing-js')
return "packaged"
if '--build-js' not in sys.argv and '--install-js' not in sys.argv:
jsbuild = jsbuild_prompt()
elif '--build-js' in sys.argv:
jsbuild = True
sys.argv.remove('--build-js')
# must be "--install-js"
else:
jsbuild = False
sys.argv.remove('--install-js')
jsbuild_ok = ('install', 'develop', 'sdist', 'egg_info', 'build')
if jsbuild and not any(arg in sys.argv for arg in jsbuild_ok):
print("Error: Option '--build-js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
if jsbuild:
build_js()
install_js()
return "built"
else:
install_js()
return "installed"
def conda_rendering():
return os.getenv("CONDA_BUILD_STATE" ,"junk") == "RENDER"
def fixup_building_sdist():
''' Check for 'sdist' and ensure we always build BokehJS when packaging
Source distributions do not ship with BokehJS source code, but must ship
with a pre-built BokehJS library. This function modifies ``sys.argv`` as
necessary so that ``--build-js`` IS present, and ``--install-js` is NOT.
Returns:
None
'''
if "sdist" in sys.argv:
if "--install-js" in sys.argv:
print("Removing '--install-js' incompatible with 'sdist'")
sys.argv.remove('--install-js')
if "--build-js" not in sys.argv:
print("Adding '--build-js' required for 'sdist'")
sys.argv.append('--build-js')
def fixup_for_packaged():
''' If we are installing FROM an sdist, then a pre-built BokehJS is
already installed in the python source tree.
The command line options ``--build-js`` or ``--install-js`` are
removed from ``sys.argv``, with a warning.
Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is
already packaged.
Returns:
None
'''
if exists(join(ROOT, 'PKG-INFO')):
if "--build-js" in sys.argv or "--install-js" in sys.argv:
print(SDIST_BUILD_WARNING)
if "--build-js" in sys.argv:
sys.argv.remove('--build-js')
if "--install-js" in sys.argv:
sys.argv.remove('--install-js')
if "--existing-js" not in sys.argv:
sys.argv.append('--existing-js')
# Horrible hack: workaround to allow creation of bdist_wheel on pip
# installation. Why, for God's sake, is pip forcing the generation of wheels
# when installing a package?
def get_cmdclass():
''' A ``cmdclass`` that works around a setuptools deficiency.
There is no need to build wheels when installing a package, however some
versions of setuptools seem to mandate this. This is a hacky workaround
that modifies the ``cmdclass`` returned by versioneer so that not having
wheel installed is not a fatal error.
'''
cmdclass = versioneer.get_cmdclass()
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
# pip is not claiming for bdist_wheel when wheel is not installed
bdist_wheel = None
if bdist_wheel is not None:
cmdclass["bdist_wheel"] = bdist_wheel
return cmdclass
def get_package_data():
''' All of all of the "extra" package data files collected by the
``package_files`` and ``package_path`` functions in ``setup.py``.
'''
return { 'bokeh': _PACKAGE_DATA }
def get_version():
''' The version of Bokeh currently checked out
Returns:
str : the version string
'''
return versioneer.get_version()
# -----------------------------------------------------------------------------
# Helpers for operation in the bokehjs dir
# -----------------------------------------------------------------------------
def jsbuild_prompt():
''' Prompt users whether to build a new BokehJS or install an existing one.
Returns:
bool : True, if a new build is requested, False otherwise
'''
print(BOKEHJS_BUILD_PROMPT)
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
# -----------------------------------------------------------------------------
# Helpers for operations in the bokehjs dir
# -----------------------------------------------------------------------------
def build_js():
''' Build BokehJS files (CSS, JS, etc) under the ``bokehjs`` source
subdirectory.
Also prints a table of statistics about the generated assets (file sizes,
etc.) or any error messages if the build fails.
Note this function only builds BokehJS assets, it does not install them
into the python source tree.
'''
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
outmsg = proc.stdout.read().decode('ascii', errors='ignore')
outmsg = "\n".join([" " + x for x in outmsg.split("\n")])
errmsg = proc.stderr.read().decode('ascii', errors='ignore')
errmsg = "\n".join([" " + x for x in errmsg.split("\n")])
print(BUILD_FAIL_MSG % (red(outmsg), red(errmsg)))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
m = pat.match(line)
if not m: continue # skip generate.py output lines
stamp, txt = m.groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
def size(*path):
return os.stat(join("bokehjs", "build", *path)).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % size("js", "bokeh.js"))
print(" - bokeh.css : %6.1f KB" % size("css", "bokeh.css"))
print(" - bokeh.min.js : %6.1f KB" % size("js", "bokeh.min.js"))
print(" - bokeh.min.css : %6.1f KB" % size("css", "bokeh.min.css"))
print(" - bokeh-widgets.js : %6.1f KB" % size("js", "bokeh-widgets.js"))
print(" - bokeh-widgets.css : %6.1f KB" % size("css", "bokeh-widgets.css"))
print(" - bokeh-widgets.min.js : %6.1f KB" % size("js", "bokeh-widgets.min.js"))
print(" - bokeh-widgets.min.css : %6.1f KB" % size("css", "bokeh-widgets.min.css"))
print(" - bokeh-tables.js : %6.1f KB" % size("js", "bokeh-tables.js"))
print(" - bokeh-tables.css : %6.1f KB" % size("css", "bokeh-tables.css"))
print(" - bokeh-tables.min.js : %6.1f KB" % size("js", "bokeh-tables.min.js"))
print(" - bokeh-tables.min.css : %6.1f KB" % size("css", "bokeh-tables.min.css"))
print(" - bokeh-api.js : %6.1f KB" % size("js", "bokeh-api.js"))
print(" - bokeh-api.min.js : %6.1f KB" % size("js", "bokeh-api.min.js"))
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
sys.exit(1)
def install_js():
''' Copy built BokehJS files into the Python source tree.
Returns:
None
'''
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print(BOKEHJS_INSTALL_FAIL)
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
# -----------------------------------------------------------------------------
# Helpers for collecting package data
# -----------------------------------------------------------------------------
_PACKAGE_DATA = []
def package_files(*paths):
'''
'''
_PACKAGE_DATA.extend(paths)
def package_path(path, filters=()):
'''
'''
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
_PACKAGE_DATA.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
_PACKAGE_DATA.append(join(path, f))
# -----------------------------------------------------------------------------
# Status and error message strings
# -----------------------------------------------------------------------------
BOKEHJS_BUILD_PROMPT = """
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
"""
BOKEHJS_INSTALL_FAIL = """
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build-js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
"""
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned the following
---- on stdout:
%s
---- on stderr:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
SDIST_BUILD_WARNING = """
Source distribution (sdist) packages come with PRE-BUILT BokehJS files.
Building/installing from the bokehjs source directory of sdist packages is
disabled, and the options --build-js and --install-js will be IGNORED.
To build or develop BokehJS yourself, you must clone the full Bokeh GitHub
repository from https://github.com/bokeh/bokeh
"""
|
Ziqi-Li/bknqgis
|
bokeh/_setup_support.py
|
Python
|
gpl-2.0
| 15,276
|
[
"GULP"
] |
85a11e7e94e51856b150f829b5ead504ce100e8bdf20b24af701e4d7bf9168f3
|
########################################################################
# $HeadURL $
# File: ListTestCase.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2011/01/17 08:17:58
########################################################################
""".. module:: ListTestCase
Test cases for DIRAC.Core.Utilities.List module.
"""
__RCSID__ = "$Id $"
##
# @file ListTestCase.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2011/01/17 08:17:58
# @brief Definition of ListTestCase class.
## imports
from DIRAC.Core.Utilities import List
import unittest
########################################################################
class ListTestCase( unittest.TestCase ):
"""py:class ListTestCase
Test case for DIRAC.Core.Utilities.List module.
"""
def testSorted( self ):
""" sortList tests """
# empty list
aList = []
self.assertEqual( List.sortList(aList), [])
# already sorted
aList = [ "a", "b", "c" ]
self.assertEqual( List.sortList(aList), ["a", "b", "c"] )
# unsorted
aList = [ "a", "c", "b" ]
self.assertEqual( List.sortList(aList), ["a", "b", "c"])
# invert
aList = [ "a", "b", "c" ]
self.assertEqual( List.sortList(aList, invert=True), ["c", "b", "a"] )
def testUniqueElements( self ):
""" uniqueElements tests """
# empty list
aList = []
self.assertEqual( List.uniqueElements(aList), [])
# redundant elements
aList = [1, 1, 2, 3]
self.assertEqual( List.uniqueElements( aList ), [1, 2, 3] )
def testAppendUnique( self ):
""" appendUnique tests """
# empty
aList = []
List.appendUnique( aList, None)
self.assertEqual( aList, [None] )
# redundant element
aList = [ 1, 2, 3 ]
List.appendUnique( aList, 1 )
self.assertEqual( aList, [1, 2, 3] )
# all unique
aList = [ 1, 2 ]
List.appendUnique( aList, 3 )
self.assertEqual( aList, [1, 2, 3] )
def testRandomize( self ):
""" randomize tests """
# empty list
aList = []
randList = List.randomize(aList)
self.assertEqual( randList, [] )
# non empty
aList = [1, 2, 3]
randList = List.randomize( aList )
self.assertEqual( len(aList), len(randList) )
for x in aList:
self.assertEqual(x in randList, True)
for x in randList:
self.assertEqual(x in aList, True)
def testPop( self ):
""" pop tests """
# empty list
aList = []
x = List.pop(aList, 1)
self.assertEqual( aList, [] )
self.assertEqual( x, None )
# pop
aList = [ 1, 2, 3 ]
x = List.pop(aList, 2)
self.assertEqual( x, 2 )
self.assertEqual( aList, [1, 3])
def testStringListToString( self ):
""" stringListToString tests """
# empty list
aList = []
aStr = List.stringListToString( aList )
self.assertEqual( aStr, "")
# not string elements (should it raise an exception???)
aList = ["a", 1]
aStr = List.stringListToString( aList )
self.assertEqual( aStr, "'a','1'")
# normal list
aList = ["a", "b", "c"]
aStr = List.stringListToString( aList )
self.assertEqual( aStr, "'a','b','c'")
def testIntListToString( self ):
""" intListToString """
# empty list
aList = [ ]
aStr = List.intListToString( aList )
self.assertEqual( aStr, "")
# int list
aList = [ 1, 2, 3 ]
aStr = List.intListToString( aList )
self.assertEqual( aStr, "1,2,3")
# mixture elements (should it raise an exception???)
aList = ["1", 2, 3]
aStr = List.intListToString( aList )
self.assertEqual( aStr, "1,2,3")
def testRemoveEmptyElements( self ):
""" removeEmptyElements tests """
# empty list
aList = []
self.assertEqual( List.removeEmptyElements(aList), [] )
# None or "" (empty string) in
aList = [ "", None, 1 ]
self.assertEqual( List.removeEmptyElements(aList), [1])
def testFromChar( self ):
""" fromChar tests """
# empty string
aStr = ""
self.assertEqual( List.fromChar(aStr, "-"), [])
# wrong sep (should it raise an exception???)
aStr = "a:b:c"
self.assertEqual( List.fromChar(aStr, "-"), ["a:b:c"])
# norman behavior
aStr = "a:b:c"
self.assertEqual( List.fromChar( aStr, ":"), ["a", "b", "c"] )
# only sep
aStr = ","
self.assertEqual( List.fromChar(aStr, ","), [] )
# too many separators
aStr = "a,,b,,c,,,"
self.assertEqual( List.fromChar(aStr, ","), ["a", "b", "c"] )
def testBreakListIntoChunks( self ):
""" breakListIntoChunks tests """
# empty list
aList = []
self.assertEqual( List.breakListIntoChunks(aList, 5), [])
# negative number of chunks
try:
List.breakListIntoChunks([], -2)
except Exception, val:
self.assertEqual( isinstance(val, RuntimeError), True )
self.assertEqual( str(val), "chunkSize cannot be less than 1" )
# normal behaviour
aList = range(10)
self.assertEqual( List.breakListIntoChunks(aList, 5), [ [0, 1, 2, 3, 4], [5, 6, 7, 8, 9] ] )
# and once again this time with a rest
aList = range(10)
self.assertEqual( List.breakListIntoChunks(aList, 4), [ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9] ])
# almost empty list, too many chunks
aList = [1]
self.assertEqual( List.breakListIntoChunks(aList, 2), [ [ 1 ] ] )
## test suite execution
if __name__ == "__main__":
TESTLOADER = unittest.TestLoader()
SUITE = TESTLOADER.loadTestsFromTestCase( ListTestCase )
unittest.TextTestRunner(verbosity=3).run( SUITE )
|
Sbalbp/DIRAC
|
Core/Utilities/test/ListTestCase.py
|
Python
|
gpl-3.0
| 5,481
|
[
"DIRAC"
] |
7437d5212a9da7f376c71cb92848872581be7a113e300063bf6e99dcefcc2981
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import numpy as np
from rbf import RbfGaussian
class RbfNetwork(object):
""" A RBF network is an advanced machine learning algorithm that uses a series of RBF functions to perform
regression. It can also perform classification by means of one-of-n encoding.
The long term memory of a RBF network is made up of the widths and centers of the RBF functions, as well as
input and output weighting.
http://en.wikipedia.org/wiki/RBF_network
"""
def __init__(self, input_count, rbf_count, output_count):
""" Create an RBF network with the specified shape.
@param input_count: The input count.
@param rbf_count: The RBF function count.
@param output_count: The output count.
"""
self.input_count = input_count
self.output_count = output_count
# calculate input and output weight counts
# add 1 to output to account for an extra bias node
input_weight_count = input_count * rbf_count
output_weight_count = (rbf_count + 1) * output_count
rbf_params = (input_count + 1) * rbf_count
self.long_term_memory = np.zeros((input_weight_count + output_weight_count + rbf_params), dtype=float)
self.index_input_weights = 0
self.index_output_weights = input_weight_count + rbf_params
self.rbf = {}
# default the Rbf's to gaussian
for i in range(0, rbf_count):
rbf_index = input_weight_count + ((input_count + 1) * i)
self.rbf[i] = RbfGaussian(input_count, self.long_term_memory, rbf_index)
def compute_regression(self, input):
""" Compute the output for the network.
@param input: The input pattern.
@return: The output pattern.
"""
# first, compute the output values of each of the RBFs
# Add in one additional RBF output for bias (always set to one).
rbf_output = [0] * (len(self.rbf) + 1)
# bias
rbf_output[len(rbf_output) - 1] = 1.0
for rbfIndex in range(0, len(self.rbf)):
# weight the input
weighted_input = [0] * len(input)
for inputIndex in range(0, len(input)):
memory_index = self.index_input_weights + (rbfIndex * self.input_count) + inputIndex
weighted_input[inputIndex] = input[inputIndex] * self.long_term_memory[memory_index]
# calculate the rbf
rbf_output[rbfIndex] = self.rbf[rbfIndex].evaluate(weighted_input)
# Second, calculate the output, which is the result of the weighted result of the RBF's.
result = [0] * self.output_count
for outputIndex in range(0, len(result)):
sum_value = 0
for rbfIndex in range(0, len(rbf_output)):
# add 1 to rbf length for bias
memory_index = self.index_output_weights + (outputIndex * (len(self.rbf) + 1)) + rbfIndex
sum_value += rbf_output[rbfIndex] * self.long_term_memory[memory_index]
result[outputIndex] = sum_value
# finally, return the result.
return result
def reset(self):
"""
Reset the network to a random state.
"""
for i in range(0, len(self.long_term_memory)):
self.long_term_memory[i] = np.random.uniform(0, 1)
def compute_classification(self, input):
""" Compute the output and return the index of the output with the largest value. This is the class that
the network recognized.
@param input: The input pattern.
@return:
"""
output = self.compute_regression(input)
return output.index(max(output))
def copy_memory(self, source):
""" Copy the specified vector into the long term memory of the network.
@param source: The source vector.
"""
for i in range(0, len(source)):
self.long_term_memory[i] = source[i]
|
trenton3983/Artificial_Intelligence_for_Humans
|
vol3/vol3-python-examples/lib/aifh/rbf_network.py
|
Python
|
apache-2.0
| 4,920
|
[
"Gaussian",
"VisIt"
] |
fe4f72c81566284170429661905f91d8e114899205d0bc69da196385d7f2f021
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.