text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
@name: Modules/Core/Mqtt/_test/test_mqtt_client.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Jun 5, 2015
@Summary:
Passed all 7 tests - DBK - 2019-08-15
"""
__updated__ = '2019-11-23'
# Import system type stuff
from twisted.trial import unittest
from twisted.internet import reactor
# Import PyMh files and modules.
from _test.testing_mixin import SetupPyHouseObj
from Modules.Core.Utilities import json_tools
from Modules.Core.Mqtt.mqtt import _make_message, MqttBrokerInformation
from Modules.House.Lighting.controllers import ControllerInformation
from Modules.House.house_data import LocationInformation
from Modules.House.Lighting.lighting import ScheduleLightingInformation
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
BROKERv4 = 'iot.eclipse.org' # Sandbox Mosquitto broker
BROKER_TLS = '192.168.1.10'
PORT = 1883
PORT_TLS = 8883
SUBSCRIBE = 'pyhouse/#'
class SetupMixin(object):
"""
"""
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
self.m_broker = MqttBrokerInformation()
def jsonPair(self, p_json, p_key):
""" Extract key, value from json
"""
l_json = json_tools.decode_json_unicode(p_json)
try:
l_val = l_json[p_key]
except (KeyError, ValueError) as e_err:
l_val = 'ERRor on JsonPair for key "{}" {} {}'.format(p_key, e_err, l_json)
print(l_val)
return l_val
class A0(unittest.TestCase):
def test_00_Print(self):
_x = PrettyFormatAny.form('_test', 'title', 190) # so it is defined when printing is cleaned up.
print('Id: test_mqtt_client')
class C1_TcpConnect(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self)
self.m_pyhouse_obj._Twisted.Reactor = reactor
# twisted.internet.base.DelayedCall.debug = True
self.m_broker.BrokerAddress = BROKERv4
self.m_broker.Host.Port = PORT
# self.m_broker.Name = TESTING_BROKER_NAME_1
def test_01_Broker(self):
""" Be sure that the XML contains the right stuff.
"""
self.m_pyhouse_obj.Core.Mqtt.Brokers = {}
self.m_pyhouse_obj.Core.Mqtt.Brokers[0] = self.m_broker
# print(PrettyFormatAny.form(self.m_pyhouse_obj.Core.Mqtt.Brokers, 'B1-01-A - Broker', 80))
# self.assertEqual(self.m_broker.Name, TESTING_BROKER_NAME_1)
class C2_ConnectTLS(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self)
self.m_pyhouse_obj._Twisted.Reactor = reactor
# twisted.internet.base.DelayedCall.debug = True
self.m_broker.BrokerAddress = BROKER_TLS
self.m_broker.Host.Port = PORT_TLS
self.m_broker.Active = True
self.m_broker.Access.Name = 'pyhouse'
self.m_broker.Access.Password = 'ChangeMe'
self.m_broker.Name = 'ClientTest'
def test_01_Broker(self):
""" Be sure that the XML contains the right stuff.
"""
self.m_pyhouse_obj.Core.Mqtt.Brokers = {}
self.m_pyhouse_obj.Core.Mqtt.Brokers[0] = self.m_broker
# print(PrettyFormatAny.form(self.m_pyhouse_obj.Core.Mqtt.Brokers, 'B2-01-A - Broker', 80))
self.assertEqual(self.m_broker.Name, 'ClientTest')
class D1_Tools(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self)
def test_02_Obj(self):
""" Be sure that the XML contains the right stuff.
"""
l_obj = LocationInformation()
l_obj.Street = '123 Test Street'
l_obj.City = 'Beverly Hills'
l_obj.State = 'Confusion'
_l_json = json_tools.encode_json(l_obj)
# print(PrettyFormatAny.form(l_json, 'Json', 80))
class C2_Publish(SetupMixin, unittest.TestCase):
""" Test the publish routine.
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_pyhouse_obj.Core.Mqtt.Prefix = "pyhouse/test_house/"
# twisted.internet.base.DelayedCall.debug = True
self.m_broker.BrokerAddress = BROKERv4
self.m_broker.Host.Port = PORT
self.m_broker.Active = True
self.m_broker.Name = 'ClientTest'
def test_02_Message(self):
""" No payload (not too useful)
"""
l_message = _make_message(self.m_pyhouse_obj)
# print(PrettyFormatAny.form(l_message, 'C2-02-A - Bare Message', 80))
self.assertEqual(self.jsonPair(l_message, 'Sender'), self.m_pyhouse_obj.Computer.Name)
self.assertSubstring('DateTime', l_message)
def test_03_MessageObj(self):
""" Add an object.
"""
l_data = ScheduleLightingInformation()
l_data.Name = 'Mqtt Controller Object'
l_data.RoomName = 'Living Room'
l_data.Comment = 'The formal Living Room.'
l_message = _make_message(self.m_pyhouse_obj, l_data)
# print(PrettyFormatAny.form(l_message, 'C2-03-A - Message', 80))
self.assertEqual(self.jsonPair(l_message, 'Sender'), self.m_pyhouse_obj.Computer.Name)
self.assertSubstring('DateTime', l_message)
self.assertEqual(self.jsonPair(l_message, 'Name'), l_data.Name)
def test_04_MessageObj(self):
""" Add an object.
"""
l_data = ControllerInformation()
l_data.Name = 'Mqtt Schedule Object'
l_data.LightName = 'Test Light'
l_data.RoomName = 'Living Room'
l_data.Comment = 'The formal Living Room.'
l_message = _make_message(self.m_pyhouse_obj, l_data)
# print(PrettyFormatAny.form(l_message, 'C2-04-A - Message', 80))
self.assertEqual(self.jsonPair(l_message, 'Sender'), self.m_pyhouse_obj.Computer.Name)
self.assertSubstring('DateTime', l_message)
self.assertEqual(self.jsonPair(l_message, 'Name'), l_data.Name)
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Core/Mqtt/_test/test_mqtt_client.py
|
Python
|
mit
| 5,911
|
[
"Brian"
] |
91b735bc755c9ee0a158d57eee83e6a536d890e491e25ad2e651f69c00574d99
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from vector import vectors
# Does not handle pawns moves (en-passant, double advance, promotion) and castle
class Movement:
def __init__(self):
self.vectors = []
self.is_sliding = False
rook_movement = Movement()
rook_movement.vectors.append(vectors['N'])
rook_movement.vectors.append(vectors['E'])
rook_movement.vectors.append(vectors['S'])
rook_movement.vectors.append(vectors['W'])
rook_movement.is_sliding = True
bishop_movement = Movement()
bishop_movement.vectors.append(vectors['NE'])
bishop_movement.vectors.append(vectors['NW'])
bishop_movement.vectors.append(vectors['SW'])
bishop_movement.vectors.append(vectors['SE'])
bishop_movement.is_sliding = True
octopus = rook_movement.vectors + bishop_movement.vectors
queen_movement = Movement()
queen_movement.vectors = octopus
queen_movement.is_sliding = True
king_movement = Movement()
king_movement.vectors = octopus
king_movement.is_sliding = False
knight_movement = Movement()
knight_movement.vectors.append(vectors['NNE'])
knight_movement.vectors.append(vectors['NNW'])
knight_movement.vectors.append(vectors['SSE'])
knight_movement.vectors.append(vectors['SSW'])
knight_movement.vectors.append(vectors['ENE'])
knight_movement.vectors.append(vectors['ESE'])
knight_movement.vectors.append(vectors['WNW'])
knight_movement.vectors.append(vectors['WSW'])
knight_movement.is_sliding = False
movements = dict()
movements['k'] = king_movement
movements['K'] = king_movement
movements['q'] = queen_movement
movements['Q'] = queen_movement
movements['r'] = rook_movement
movements['R'] = rook_movement
movements['b'] = bishop_movement
movements['B'] = bishop_movement
movements['n'] = knight_movement
movements['N'] = knight_movement
|
victor-rene/chess-intuition
|
chess/movement.py
|
Python
|
mit
| 1,770
|
[
"Octopus"
] |
5b536b6ad4b7bf6afe537ccafbbe7266142dd86f23b9b9cb4b5f03a3c83c2f73
|
from __future__ import division, absolute_import, print_function
import os
import sys
import types
import re
import warnings
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
from numpy.core import ndarray, ufunc, asarray
__all__ = [
'issubclass_', 'issubsctype', 'issubdtype', 'deprecate',
'deprecate_with_doc', 'get_include', 'info', 'source', 'who',
'lookfor', 'byte_bounds', 'safe_eval'
]
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d
def _set_function_name(func, name):
func.__name__ = name
return func
class _Deprecate(object):
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
import warnings
if old_name is None:
try:
old_name = func.__name__
except AttributeError:
old_name = func.__name__
if new_name is None:
depdoc = "`%s` is deprecated!" % old_name
else:
depdoc = "`%s` is deprecated, use `%s` instead!" % \
(old_name, new_name)
if message is not None:
depdoc += "\n" + message
def newfunc(*args,**kwds):
"""`arrayrange` is deprecated, use `arange` instead!"""
warnings.warn(depdoc, DeprecationWarning)
return func(*args, **kwds)
newfunc = _set_function_name(newfunc, old_name)
doc = func.__doc__
if doc is None:
doc = depdoc
else:
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
d = func.__dict__
except AttributeError:
pass
else:
newfunc.__dict__.update(d)
return newfunc
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in
which case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case the
deprecation message is that `old_name` is deprecated. If given, the
deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation
Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
/usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:
DeprecationWarning: uint32 is deprecated
warnings.warn(str1, DeprecationWarning)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
if args:
fn = args[0]
args = args[1:]
# backward compatibility -- can be removed
# after next release
if 'newname' in kwargs:
kwargs['new_name'] = kwargs.pop('newname')
if 'oldname' in kwargs:
kwargs['old_name'] = kwargs.pop('oldname')
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
deprecate_with_doc = lambda msg: _Deprecate(message=msg)
#--------------------------------------------
# Determine if two arrays can share memory
#--------------------------------------------
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array
interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second
integer is just past the last byte of the array. If `a` is not
contiguous it will not use every byte between the (`low`, `high`)
values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2, dtype='G'); I.dtype
dtype('complex192')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
bytes_a = asarray(a).dtype.itemsize
a_low = a_high = a_data
if astrides is None:
# contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
def who(vardict=None):
"""
Print the Numpy arrays in the given dictionary.
If there is no dictionary passed in or `vardict` is None then returns
Numpy arrays in the globals() dictionary (all Numpy arrays in the
namespace).
Parameters
----------
vardict : dict, optional
A dictionary possibly containing ndarrays. Default is globals().
Returns
-------
out : None
Returns 'None'.
Notes
-----
Prints out the name, shape, bytes and type of all of the ndarrays
present in `vardict`.
Examples
--------
>>> a = np.arange(10)
>>> b = np.ones(20)
>>> np.who()
Name Shape Bytes Type
===========================================================
a 10 40 int32
b 20 160 float64
Upper bound on total bytes = 200
>>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
... 'idx':5}
>>> np.who(d)
Name Shape Bytes Type
===========================================================
y 3 24 float64
x 2 16 float64
Upper bound on total bytes = 40
"""
if vardict is None:
frame = sys._getframe().f_back
vardict = frame.f_globals
sta = []
cache = {}
for name in vardict.keys():
if isinstance(vardict[name], ndarray):
var = vardict[name]
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
original = 0
else:
cache[idv] = name
namestr = name
original = 1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
original])
maxname = 0
maxshape = 0
maxbyte = 0
totalbytes = 0
for k in range(len(sta)):
val = sta[k]
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
maxshape = len(val[1])
if maxbyte < len(val[2]):
maxbyte = len(val[2])
if val[4]:
totalbytes += int(val[2])
if len(sta) > 0:
sp1 = max(10, maxname)
sp2 = max(10, maxshape)
sp3 = max(10, maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print(prval + "\n" + "="*(len(prval)+5) + "\n")
for k in range(len(sta)):
val = sta[k]
print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
val[3]))
print("\nUpper bound on total bytes = %d" % totalbytes)
return
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of width
# characters. End lines on a comma and begin argument list indented with
# the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while True:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x], types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def _info(obj, output=sys.stdout):
"""Provide information about ndarray obj.
Parameters
----------
obj: ndarray
Must be ndarray, not checked.
output:
Where printed output goes.
Notes
-----
Copied over from the numarray module prior to its removal.
Adapted somewhat as only numpy is an option now.
Called by info.
"""
extra = ""
tic = ""
bp = lambda x: x
cls = getattr(obj, '__class__', type(obj))
nm = getattr(cls, '__name__', cls)
strides = obj.strides
endian = obj.dtype.byteorder
print("class: ", nm, file=output)
print("shape: ", obj.shape, file=output)
print("strides: ", strides, file=output)
print("itemsize: ", obj.itemsize, file=output)
print("aligned: ", bp(obj.flags.aligned), file=output)
print("contiguous: ", bp(obj.flags.contiguous), file=output)
print("fortran: ", obj.flags.fortran, file=output)
print(
"data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra),
file=output
)
print("byteorder: ", end=' ', file=output)
if endian in ['|', '=']:
print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
byteswap = False
elif endian == '>':
print("%sbig%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "big"
else:
print("%slittle%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "little"
print("byteswap: ", bp(byteswap), file=output)
print("type: %s" % obj.dtype, file=output)
def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
"""
Get help information for a function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
modules are searched for matching objects. If None, information
about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is
``stdout``. The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
See Also
--------
source, lookfor
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent
to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc
import inspect
if (hasattr(object, '_ppimport_importer') or
hasattr(object, '_ppimport_module')):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, ndarray):
_info(object, output=output)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print("\n "
"*** Repeat reference found in %s *** " % namestr,
file=output
)
else:
objlist.append(id(obj))
print(" *** Found in %s ***" % namestr, file=output)
info(obj)
print("-"*maxwidth, file=output)
numfound += 1
except KeyError:
pass
if numfound == 0:
print("Help for %s not found." % object, file=output)
else:
print("\n "
"*** Total of %d references found. ***" % numfound,
file=output
)
elif inspect.isfunction(object):
name = object.__name__
arguments = inspect.formatargspec(*inspect.getargspec(object))
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif inspect.isclass(object):
name = object.__name__
arguments = "()"
try:
if hasattr(object, '__init__'):
arguments = inspect.formatargspec(
*inspect.getargspec(object.__init__.__func__)
)
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
except:
pass
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object, '__init__'):
print(inspect.getdoc(object.__init__), file=output)
else:
print(inspect.getdoc(object), file=output)
methods = pydoc.allmethods(object)
if methods != []:
print("\n\nMethods:\n", file=output)
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(
inspect.getdoc(thisobj) or "None"
)
print(" %s -- %s" % (meth, methstr), file=output)
elif (sys.version_info[0] < 3
and isinstance(object, types.InstanceType)):
# check for __call__ method
# types.InstanceType is the type of the instances of oldstyle classes
print("Instance of class: ", object.__class__.__name__, file=output)
print(file=output)
if hasattr(object, '__call__'):
arguments = inspect.formatargspec(
*inspect.getargspec(object.__call__.__func__)
)
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if hasattr(object, 'name'):
name = "%s" % object.name
else:
name = "<name>"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc = inspect.getdoc(object.__call__)
if doc is not None:
print(inspect.getdoc(object.__call__), file=output)
print(inspect.getdoc(object), file=output)
else:
print(inspect.getdoc(object), file=output)
elif inspect.ismethod(object):
name = object.__name__
arguments = inspect.formatargspec(
*inspect.getargspec(object.__func__)
)
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif hasattr(object, '__doc__'):
print(inspect.getdoc(object), file=output)
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a Numpy object.
The source code is only returned for objects written in Python. Many
functions and classes are defined in C and will therefore not return
useful information.
Parameters
----------
object : numpy object
Input object. This can be any object (function, class, module,
...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
See Also
--------
lookfor, info
Examples
--------
>>> np.source(np.interp) #doctest: +SKIP
In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
def interp(x, xp, fp, left=None, right=None):
\"\"\".... (full docstring printed)\"\"\"
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
The source code is only returned for objects written in Python.
>>> np.source(np.array) #doctest: +SKIP
Not available for this object.
"""
# Local import to speed up numpy's import time.
import inspect
try:
print("In file: %s\n" % inspect.getsourcefile(object), file=output)
print(inspect.getsource(object), file=output)
except:
print("Not available for this object.", file=output)
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
# where kind: "func", "class", "module", "object"
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
# regexp whose match indicates that the string may contain a function
# signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
Do a keyword search on docstrings.
A list of of objects that matched the search is displayed,
sorted by relevance. All given keywords need to be found in the
docstring for it to be returned as a result, but the order does
not matter.
Parameters
----------
what : str
String containing words to look for.
module : str or list, optional
Name of module(s) whose docstrings to go through.
import_modules : bool, optional
Whether to import sub-modules in packages. Default is True.
regenerate : bool, optional
Whether to re-generate the docstring cache. Default is False.
output : file-like, optional
File-like object to write the output to. If omitted, use a pager.
See Also
--------
source, info
Notes
-----
Relevance is determined only roughly, by checking if the keywords occur
in the function name, at the start of a docstring, etc.
Examples
--------
>>> np.lookfor('binary representation')
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
numpy.core.setup_common.long_double_representation
Given a binary dump as given by GNU od -b, look for long double
numpy.base_repr
Return a string representation of a number in the given base system.
...
"""
import pydoc
# Cache
cache = _lookfor_generate_cache(module, import_modules, regenerate)
# Search
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
if not whats:
return
for name, (docstring, kind, index) in cache.items():
if kind in ('module', 'object'):
# don't show modules or objects
continue
ok = True
doc = docstring.lower()
for w in whats:
if w not in doc:
ok = False
break
if ok:
found.append(name)
# Relevance sort
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
r = 0
# do the keywords occur within the start of the docstring?
first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
r += sum([200 for w in whats if w in first_doc])
# do the keywords occur in the function name?
r += sum([30 for w in whats if w in name])
# is the full name long?
r += -len(name) * 5
# is the object of bad type?
r += kind_relevance.get(kind, -1000)
# is the object deep in namespace hierarchy?
r += -name.count('.') * 10
r += max(-index / 100, -100)
return r
def relevance_value(a):
return relevance(a, *cache[a])
found.sort(key=relevance_value)
# Pretty-print
s = "Search results for '%s'" % (' '.join(whats))
help_text = [s, "-"*len(s)]
for name in found[::-1]:
doc, kind, ix = cache[name]
doclines = [line.strip() for line in doc.strip().split("\n")
if line.strip()]
# find a suitable short description
try:
first_doc = doclines[0].strip()
if _function_signature_re.search(first_doc):
first_doc = doclines[1].strip()
except IndexError:
first_doc = ""
help_text.append("%s\n %s" % (name, first_doc))
if not found:
help_text.append("Nothing found.")
# Output
if output is not None:
output.write("\n".join(help_text))
elif len(help_text) > 10:
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
print("\n".join(help_text))
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate : bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
if sys.version_info[0] >= 3:
# In Python3 stderr, stdout are text files.
from io import StringIO
else:
from StringIO import StringIO
if module is None:
module = "numpy"
if isinstance(module, str):
try:
__import__(module)
except ImportError:
return {}
module = sys.modules[module]
elif isinstance(module, list) or isinstance(module, tuple):
cache = {}
for mod in module:
cache.update(_lookfor_generate_cache(mod, import_modules,
regenerate))
return cache
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen:
continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
if (os.path.isfile(this_py) and
mod_path.endswith('.py')):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
else:
continue
if to_import == '__init__':
continue
try:
# Catch SystemExit, too
base_exc = BaseException
except NameError:
# Python 2.4 doesn't have BaseException
base_exc = Exception
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
__import__("%s.%s" % (name, to_import))
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
except base_exc:
continue
for n, v in _getmembers(item):
try:
item_name = getattr(v, '__name__', "%s.%s" % (name, n))
mod_name = getattr(v, '__module__', None)
except NameError:
# ref. SWIG's global cvars
# NameError: Unknown C global variable
item_name = "%s.%s" % (name, n)
mod_name = None
if '.' not in item_name and mod_name:
item_name = "%s.%s" % (mod_name, item_name)
if not item_name.startswith(name + '.'):
# don't crawl "foreign" objects
if isinstance(v, ufunc):
# ... unless they are ufuncs
pass
else:
continue
elif not (inspect.ismodule(v) or _all is None or n in _all):
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in _getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif hasattr(item, "__call__"):
kind = "func"
try:
doc = inspect.getdoc(item)
except NameError:
# ref SWIG's NameError: Unknown C global variable
doc = None
if doc is not None:
cache[name] = (doc, kind, index)
return cache
def _getmembers(item):
import inspect
try:
members = inspect.getmembers(item)
except Exception:
members = [(x, getattr(item, x)) for x in dir(item)
if hasattr(item, x)]
return members
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
# ASPN Python Cookbook recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
# Accordingly it is mostly Copyright 2006 by Michael Spencer.
# The recipe, like most of the other ASPN Python Cookbook recipes was made
# available under the Python license.
# http://www.python.org/license
# It has been modified to:
# * handle unary -/+
# * support True/False/None
# * raise SyntaxError instead of a custom exception.
class SafeEval(object):
"""
Object to evaluate constant string expressions.
This includes strings with lists, dicts and tuples using the abstract
syntax tree created by ``compiler.parse``.
.. deprecated:: 1.10.0
See Also
--------
safe_eval
"""
def __init__(self):
# 2014-10-15, 1.10
warnings.warn("SafeEval is deprecated in 1.10 and will be removed.",
DeprecationWarning)
def visit(self, node):
cls = node.__class__
meth = getattr(self, 'visit' + cls.__name__, self.default)
return meth(node)
def default(self, node):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node):
return self.visit(node.body)
def visitNum(self, node):
return node.n
def visitStr(self, node):
return node.s
def visitBytes(self, node):
return node.s
def visitDict(self, node,**kw):
return dict([(self.visit(k), self.visit(v))
for k, v in zip(node.keys, node.values)])
def visitTuple(self, node):
return tuple([self.visit(i) for i in node.elts])
def visitList(self, node):
return [self.visit(i) for i in node.elts]
def visitUnaryOp(self, node):
import ast
if isinstance(node.op, ast.UAdd):
return +self.visit(node.operand)
elif isinstance(node.op, ast.USub):
return -self.visit(node.operand)
else:
raise SyntaxError("Unknown unary op: %r" % node.op)
def visitName(self, node):
if node.id == 'False':
return False
elif node.id == 'True':
return True
elif node.id == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.id)
def visitNameConstant(self, node):
return node.value
def safe_eval(source):
"""
Protected string evaluation.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains
non-literal code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
"""
# Local import to speed up numpy's import time.
import ast
return ast.literal_eval(source)
#-----------------------------------------------------------------------------
|
jankoslavic/numpy
|
numpy/lib/utils.py
|
Python
|
bsd-3-clause
| 34,950
|
[
"VisIt"
] |
4fbf2fe289c300cfbba7a022375e95d6f73ac0f9c1423a03274a7b7edf180e6a
|
""" Module to perform a trapezoid model fit to flux time seres data
Author: Christopher J Burke
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
import sys
def phaseData(t, per, to):
"""Phase the data at period per and centered at to
INPUT:
t - time of data
per - period to phase time data period and t should
be in same units
to - epoch of phase zero
OUTPUT:
phi - data phased running from -0.5<phi<=0.5
"""
phi = np.mod(t - to, per) / per
phi = np.where(phi > 0.5, phi - 1.0, phi)
return phi
class trp_parameters:
"""Storage class for the parameters of the trapezoid fit algorithms
CONTENTS:
samplen - [Int] subsampling of LC model data
***MUST BE ODD*** No checking this
likehoodmoddisplay - [Int] If debugLevel > =3 display likelihood call
model and residual every iteration mod of
this parameter
cadlen - [Days] Cadence duration
fitregion - [float] Factor of duration around midpoint to actually
fit to data.
"""
def __init__(self):
self.samplen = 15
self.likehoodmoddisplay = 10
self.cadlen = 29.424/60.0/24.0 #Kepler cadence
self.fitregion = 4.0
self.debugLevel = 4
def __str__(self):
for k in self.__dict__:
print k, self.__dict__[k]
return ''
class trp_originalestimates:
"""Storage class for the original parameter estimations
CONTENTS:
period [Days] - Initial orbital period
***By default this is fixed during fitting***
epoch [Days] - Initial epoch
duration [Hours] - Initial duration fitted **In Hours**
depth [ppm] - Initial depth
"""
def __init__(self):
self.period = 1.0
self.epoch = 0.1
self.duration = 3.0
self.depth = 100.0
def __str__(self):
for k in self.__dict__:
print k, self.__dict__[k]
return ''
class trp_planetestimates:
"""Storage class for estimating a planet model based
upon the trapezoid fit solution. See Carter et al. 2008
CONTENTS:
u1 - quadratic limb darkening parameters to use
u2 - ''
period [Days] - Resulting period currently not fit
radiusRatio - from purely geometric depth=(Rp/Rstar)^2
impactParameter - Impact parameter
tauzero - [Days] - transit timescale constant
semiaxisRatio - Semi-major axis to stellar radius ratio
surfaceBright - Limb darkened surface brightness at crossing
impact parameter
equivRadiusRatio - Crude approximation to radius ratio
taking into account limb darkening
that works better than the purely geometric
radius ratio
minDepth [ppm] - minimum depth from model
avgDepth [ppm] - mean depth across transit
epoch - epoch of fit midpoint
bigT [day] - trapezoid model full duration
littleT [day] - trapezoid model ingress/egress duration
depth [ppm] - trapezoid model depth parameter
"""
def __init__(self):
self.u1 = 0.40 # limb darkening for Sun in Kepler passband
self.u2 = 0.27
self.period = 1.0
self.radiusRatio = 0.0
self.impactParameter = 0.5
self.tauzero = 0.1
self.semiaxisRatio = 20.0
self.surfaceBright = 0.5
self.equivRadiusRatio = 0.0
self.minDepth = 0.0
self.epoch = 0.0
self.bigT = 0.0
self.littleT = 0.0
self.depth = 0.0
def __str__(self):
for k in self.__dict__:
print k, self.__dict__[k]
return ''
class trp_ioblk:
"""Define a class that contains all the data needed to perform
trapezod fits. Numerous functions will use as input this
class. This is purely a storage class
See trp_setup to illustrate how to iinitialize these
storage classes
CONTENTS:
parm - [class] Storage class trp_parameters for algorithm parameters
origests - [class] Storage class trp_originalestimates for initial
parameter estimates
"""
def __init__(self):
self.parm = trp_parameters()
self.origests = trp_originalestimates()
self.planetests = trp_planetestimates()
self.physval_names = ['']
self.fixed = np.array([0])
self.nparm = 0
self.physval_mins = np.array([0.0])
self.physval_maxs = np.array([0.0])
self.physvals = np.array([0.0])
self.physvalsavs = np.array([0.0])
self.bestphysvals = np.array([0.0])
self.boundedvals = np.array([0.0])
self.boundedvalsavs = np.array([0.0])
self.bestboundedvals = np.array([0.0])
self.model = np.array([0.0])
self.errscl = 1.0
self.chi2min = 0.0
self.minimized = False
self.sampleit = np.array([0.0])
self.fitdata = np.array(0, dtype=np.bool)
self.normlc = np.array([0.0])
self.normes = np.array([0.0])
self.normts = np.array([0.0])
self.normots = np.array([0.0])
self.timezpt = 0.0
def __str__(self):
for k in self.__dict__:
print k, self.__dict__[k]
return ''
def boundedvals(ioblk):
"""Convert parameters to bounded versions that the minimzer will use
INPUT:
ioblk - [class] trp_ioblk class
OUTPUT:
ioblk - [class]
err - [0 ok ; 1 not ok]
"""
err = 0 # Error flag
maxmindelta = ioblk.physval_maxs - ioblk.physval_mins
datamindelta = ioblk.physvals - ioblk.physval_mins
ioblk.boundedvals = -np.log( maxmindelta / datamindelta - 1.0)
if ~np.isfinite(ioblk.boundedvals).all():
print "Bounded Vals Bad"
print ioblk.boundedvals
print ioblk.physvals
err = 1
return ioblk, err
def unboundedvals(ioblk):
"""Convert bounded parameter values that the minimizer uses to physvals
INPUT:
ioblk - [class] trp_ioblk class
OUTPUT:
ioblk - [class]
err - [0 ok ; 1 not ok]
"""
err = 0 # Error flag
maxmindelta = ioblk.physval_maxs - ioblk.physval_mins
ioblk.physvals = ioblk.physval_mins + \
(maxmindelta / (1.0 + np.exp( -ioblk.boundedvals )))
#if np.sum( np.isfinite(ioblk.physvals) ) != np.size(ioblk.boundedvals) :
if ~np.isfinite(ioblk.physvals).all():
print "UnBounded Vals Bad"
print ioblk.boundedvals
print ioblk.physvals
err = 1
return ioblk, err
def trapezoid(t, depth, bigT, littleT):
"""Trapezoid shape for model
INPUT:
t - [float] vector of independent values to evaluate
trapezoid model
depth - [float] depth of trapezoid
bigT - [float] full trapezoid duration
littleT - [float] 'ingress/egress' duration
OUTPUT:
output - [float] vector of trapezoid model values
"""
output = np.full_like(t, 1.0)
t = np.abs(t)
output = np.where(t <= bigT/2.0 - littleT/2.0, 1.0 - depth, output)
output = np.where(np.logical_and(t > bigT/2.0 - littleT/2.0, \
t < bigT/2.0 + littleT/2.0), \
1.0 - depth + ((depth/littleT)* \
(t-bigT/2.0 + littleT/2.0)), output)
return output
def trapezoid_model_onemodel(ts, period, epoch, depth, bigT, littleT, subsamplen):
"""Make a trapezoid model at the given input parameters. This routine
generates the ioblk class which is used in the transit model.
You can save time if you want to generate many models by
calling this function once to generate the ioblk and then call
trapezoid_model_raw() to generate the models at other inputs
bypassing some of the setup routines in this function.
INPUT:
ts - Mid cadence time stamps
period - Period of signal ***assumed fixed during model generation**
epoch - Estimated epoch of signal. Must be on same system
as ts
depth [ppm] - Model depth
bigT [hr] -full transit duration in hours
littleT [hr] - ingress time in hours
subsample - Subsample each cadence by this factor
OUTPUT:
ioblk - structure class containing model ligh curve
located at ioblk.modellc
"""
# Instantiate trp_ioblk class and fill in values
ioblk = trp_ioblk()
ioblk.parm.debugLevel = 0
ioblk.parm.samplen = subsamplen
ioblk.normots = ts
ioblk.origests.period = period
ioblk.origests.epoch = epoch
ioblk.origests.depth = depth
ioblk.origests.duration = bigT
# Calculate this from timeSeries
ioblk.parm.cadlen = np.median(np.diff(ts))
ioblk = trp_setup(ioblk)
# update the tratio
ioblk.physvals[3] = littleT / bigT
ioblk, err = boundedvals(ioblk)
ioblk.physvalsavs = ioblk.physvals
ioblk.boundedvalsavs = ioblk.boundedvals
ioblk, err = trapezoid_model(ioblk)
return ioblk
def trapezoid_model_raw(ioblk, epoch, depth, bigT, littleT):
"""If you have a preexisting ioblk from fit or trapezoid_model_onemodel()
You can just call this function to get another model
at a different epoch depth duration and ingress time
****period is not variable at this point call
trapezoid_model_onemodel() instead
INPUT:
ioblk - pre-existing ioblk from fitting or trapezoid_model_onemodel()
epoch - Estimated epoch of signal. Must be on same system
as ts
depth [ppm] - Model depth
bigT [hr] -full transit duration in hours
littleT [hr] - ingress time in hour
OUTPUT:
ioblk - structure class containing model ligh curve
located at ioblk.modellc
"""
ioblk.physvals[0] = epoch - ioblk.origests.epoch
ioblk.physvals[1] = depth / 1.0e6
ioblk.physvals[2] = bigT / 24.0
ioblk.physvals[3] = littleT / bigT
ioblk, err = boundedvals(ioblk)
ioblk, err = trapezoid_model(ioblk)
return ioblk
def trapezoid_model(ioblk):
"""Generate a subsampled model at the current parameters
INPUT:
ioblk - [class] trp_ioblk class structure
OUTPUT:
ioblk - [class] modified ioblk
err - [0 ok; 1 not ok] Error flag
"""
err = 0
to = ioblk.physvals[0]
depth = ioblk.physvals[1]
bigT = ioblk.physvals[2]
tRatio = ioblk.physvals[3]
littleT = tRatio * bigT
per = ioblk.origests.period
ts = ioblk.normts
phi = phaseData(ts, per, to)
lc = np.ones_like(ioblk.normts)
cadlen = ioblk.parm.cadlen
samplen = ioblk.parm.samplen
# Call trapezoid model for data points without any subsampling needed
idx = np.where(np.logical_and(ioblk.fitdata, ioblk.sampleit == 1))[0]
if idx.size > 0:
ztmp = phi[idx] * per
lctmp = trapezoid(ztmp, depth, bigT, littleT)
lc[idx] = lctmp
# Call trapezoid model for data points that need subsampling
idx = np.where(np.logical_and(ioblk.fitdata, ioblk.sampleit > 1))[0]
if idx.size > 0:
ztmp = phi[idx] * per
deltaXSmall = cadlen / np.float(samplen)
smallBlock = np.linspace(-cadlen/2.0 + deltaXSmall/2.0,
cadlen/2.0 - deltaXSmall/2.0, samplen)
oN = ztmp.size
ztmp_highres = np.tile(ztmp, samplen)
ztmp_highres = np.reshape(ztmp_highres, (samplen, oN))
smallBlock_highres = np.tile(smallBlock, oN)
smallBlock_highres = np.reshape(smallBlock_highres, (oN, samplen))
smallBlock_highres = np.transpose(smallBlock_highres)
ztmp_highres = ztmp_highres + smallBlock_highres
ztmp_highres = ztmp_highres.ravel(order='F')
lctmp_highres = trapezoid(ztmp_highres, depth, bigT, littleT)
nN = ztmp_highres.size
lctmp = lctmp_highres.reshape([oN, nN/oN]).mean(1)
lc[idx] = lctmp
ioblk.modellc = lc
if np.sum(np.isfinite(lc)) != lc.size:
err = 1
return ioblk, err
def trp_setup(ioblk):
"""Setup various data products before minimizing
INPUT:
ioblk - [class] trp_ioblk class structure
OUTPUT:
ioblk - [class] modified ioblk
"""
per = ioblk.origests.period
eph = ioblk.origests.epoch
dur = ioblk.origests.duration
depth = ioblk.origests.depth / 1.0e6
durday = dur / 24.0
phidur = dur / 24.0 / per
# Normalize the time series
ts = ioblk.normots
medianEvent = np.median(np.round((ts - eph)/per))
ioblk.timezpt = eph + (medianEvent * per)
ioblk.normts = ioblk.normots - ioblk.timezpt
# identify in transit data to over sample and fitting region
phi = phaseData(ioblk.normts, per, 0.0)
ioblk.sampleit = np.where(abs(phi) < (phidur * 1.5), ioblk.parm.samplen, 1)
ioblk.fitdata = np.where(abs(phi) < (phidur * ioblk.parm.fitregion),\
True, False)
# always fit less than a 0.25 of phase space for stability
# and efficiency reasons
ioblk.fitdata = np.where(abs(phi) > 0.25, False, ioblk.fitdata)
# Set parameters and bounds
ioblk.physval_names = ['To', 'Depth', 'BigT', 'TRatio']
ioblk.physval_mins = np.array([-durday*1.5, 1.0e-6, 0.0, 1.0e-10])
ioblk.physval_maxs = np.array([ durday*1.5, depth*5.0, durday*3.0, 1.0])
ioblk.fixed = np.array([0, 0, 0, 0])
ioblk.physvals = np.array([0.0, depth, durday, 0.2])
ioblk.nparm = np.size(ioblk.fixed)
# Validate trapezoid fit inputs look reasonable
trp_validate(ioblk)
ioblk.modellc = np.full_like(ioblk.normlc, 1.0)
ioblk.chi2min = ioblk.normlc.size * 2000.0
ioblk.likecount = 0
ioblk, err = boundedvals(ioblk)
# physvalsavs and boundedvalsavs are used to store parameters
# that are fixed during the calculation
# ***They must be populated with fixed values before moving forward
ioblk.physvalsavs = ioblk.physvals
ioblk.boundedvalsavs = ioblk.boundedvals
ioblk.bestphysvals = ioblk.physvals
ioblk.bestboundedvals = ioblk.boundedvals
ioblk.minimized = False
return ioblk
def trp_validate(ioblk):
# Check that physvals are within limits
if (np.any(np.greater_equal(ioblk.physvals, ioblk.physval_maxs))):
print 'physvals: {} is greater than physval_maxs: {}'.format( \
ioblk.physvals,ioblk.physval_maxs)
raise ValueError("TrapFit: physvals has value greater than physval_maxs")
if (np.any(np.less_equal(ioblk.physvals, ioblk.physval_mins))):
print 'physvals: {} is less than physval_mins: {}'.format( \
ioblk.physvals,ioblk.physval_mins)
raise ValueError("TrapFit: physvals has value less than physval_mins")
# Check for NaNs in input data series
if (np.any(np.isnan(ioblk.normlc))):
raise ValueError("TrapFit: Input light curve contains NaN")
if (np.any(np.isnan(ioblk.normes))):
raise ValueError("TrapFit: Input uncertainty estimate contains NaN")
if (np.any(np.isnan(ioblk.normots))):
raise ValueError("TrapFit: Input time data contains NaN")
# Check for input data series that has negative flux data should be
# normalized to 1.0
if (np.any(np.less(ioblk.normlc,0.0))):
raise ValueError("TrapFit: Negative Flux in light curve")
def trp_likehood(pars,ioblk):
"""Return a residual time series of data minus model
trp_setup(ioblk) should be called before this function is called
INPUT:
pars - [numpy array] vector of parameter values
ioblk - [class] trp_ioblk class structure
OUTPUT:
residuals - sum of squares of residuals of data - model
ioblk - [class] modified ioblk
"""
ioblk.likecount += 1
# Update parameters into bounded values
idx = np.where(ioblk.fixed == 0)[0]
ioblk.boundedvals[idx] = pars
ioblk.boundedvals = np.where(ioblk.fixed == 1, ioblk.boundedvalsavs,
ioblk.boundedvals)
# Convert to unbounded values
ioblk, err = unboundedvals(ioblk)
# Generate Model
ioblk, err = trapezoid_model(ioblk)
# Calculate residuals
idx = np.where(ioblk.fitdata)[0]
residuals = (ioblk.normlc[idx] - ioblk.modellc[idx])/(ioblk.normes[idx] * ioblk.errscl)
# Return scalar summed residuals
residuals = np.sum(residuals**2)
# Do plotting
if ioblk.parm.debugLevel > 2:
if ioblk.likecount == 1: # Setup figures for first time
ioblk.fighandle = plt.figure(figsize=(3,2),dpi=300,
facecolor='white')
ioblk.axhandle = plt.gca()
ioblk.axhandle.set_position([0.125, 0.125, 0.825, 0.825])
ioblk.axhandle.set_axis_bgcolor('white')
if np.mod(ioblk.likecount, ioblk.parm.likehoodmoddisplay) == 0 \
or ioblk.likecount == 1:
plt.figure(ioblk.fighandle.number)
plt.cla()
period = ioblk.origests.period
tzero = ioblk.physvals[0]
ts = ioblk.normts
phi = phaseData(ts, period, tzero)
plt.plot(phi,ioblk.normlc,'.',markersize=0.6)
plt.plot(phi,ioblk.modellc,'.r',markersize=0.6)
plt.pause(0.0001) # This line forces a draw it seems
# getting matplotlib to plot in a non blocking
# manner has a storied history on the web
# this method may fail in later versions
if ioblk.parm.debugLevel > 3:
raw_input("Press [ENTER]")
return residuals
def trp_iterate_solution(ioblk, nIter):
"""Peform multiple iterations starting from random initial conditions
return the best solution in a chi2 sense among the nIter iterations
"""
bestChi2s = np.zeros(nIter)
bestParameters = np.zeros((ioblk.physvals.size, nIter))
gdFits = np.zeros(nIter, dtype=np.bool)
depth = ioblk.origests.depth / 1.0e6
for i in range(nIter):
ioblk.physvals = ioblk.physval_mins + \
np.random.rand(ioblk.physvals.size) * \
(ioblk.physval_maxs - ioblk.physval_mins)
# Force depth parameter to start at minimum half the depth
if ioblk.physvals[1] < np.abs(depth/2.0):
ioblk.physvals[1] = depth / 2.0
# Replace random starts with parameters values that are fixed
ioblk.physvals = np.where(ioblk.fixed == 1, ioblk.physvalsavs, \
ioblk.physvals)
ioblk, err = boundedvals(ioblk)
freeidx = np.where(ioblk.fixed == 0)[0]
startParameters = ioblk.boundedvals[freeidx]
#usemethod = 'Nelder-Mead'
usemethod = 'Powell'
useoptions = {'xtol': 1e-5, 'ftol': 1e-5, 'maxiter': 2000, 'maxfev': 2000}
#usemethod = 'CG'
#useoptions = {'gtol': 1e-5, 'maxiter': 2000}
allOutput = opt.minimize(trp_likehood, startParameters, args=(ioblk,), \
method=usemethod, options=useoptions)
ioblk.boundedvals[freeidx] = allOutput['x']
ioblk.boundedvals = np.where(ioblk.fixed == 1, ioblk.boundedvalsavs, \
ioblk.boundedvals)
ioblk, err = unboundedvals(ioblk)
chi2min = allOutput['fun']
if ioblk.parm.debugLevel > 0:
strout = "%s %d %s %f" % ("It: ",i," Chi2: ",chi2min)
print strout
print ioblk.physvals
if np.isfinite(ioblk.physvals).all():
gdFits[i] = True
bestChi2s[i] = chi2min
bestParameters[:,i] = ioblk.physvals
# Done with iterations find the best one by chi2min
bestMaskedIdx = np.argmin(bestChi2s[gdFits])
ioblk.chi2min = bestChi2s[gdFits][bestMaskedIdx]
ioblk.bestphysvals = bestParameters[:,gdFits][:,bestMaskedIdx]
ioblk.physvals = ioblk.bestphysvals
ioblk, err = boundedvals(ioblk)
ioblk.bestboundedvals = ioblk.boundedvals
if ioblk.parm.debugLevel > 0:
strout = "%s %f" % ("Overall Best Chi2 Min: ",ioblk.chi2min)
print strout
print ioblk.physvals
ioblk.minimized = True
return ioblk
def trp_estimate_planet(ioblk):
"""Convert the trapezoid fit solution into a crude estimate
of a planet model that is close to trapezoid solution
This fills out values in trp_planetestimates class
"""
if not ioblk.minimized:
strout = "Warning getting planet estimates for non converged \
trapezoid fit. Do not trust results"
print strout
ioblk.planetests.period = ioblk.origests.period
ioblk.planetests.epoch = ioblk.timezpt + ioblk.bestphysvals[0]
ioblk.planetests.bigT = ioblk.bestphysvals[2]
ioblk.planetests.littleT = ioblk.bestphysvals[3] * \
ioblk.planetests.bigT
ioblk.planetests.depth = ioblk.bestphysvals[1]
# call likehood to get best transit model
idx = np.where(ioblk.fixed == 0)[0]
resids = trp_likehood(ioblk.bestboundedvals[idx], ioblk)
trapmodlc = ioblk.modellc
ioblk.planetests.minDepth = (1.0 - trapmodlc.min()) * 1.0e6
ioblk.planetests.radiusRatio = np.sqrt(ioblk.planetests.minDepth / 1.0e6)
ioblk.planetests.impactParameter = np.sqrt(1.0 - \
np.amin([ioblk.planetests.radiusRatio * \
ioblk.planetests.bigT/ioblk.planetests.littleT, 1.0]))
ioblk.planetests.tauzero = np.sqrt(ioblk.planetests.bigT * \
ioblk.planetests.littleT / 4.0 / \
ioblk.planetests.radiusRatio)
ioblk.planetests.semiaxisRatio = ioblk.planetests.period / 2.0 / \
np.pi / ioblk.planetests.tauzero
mu = np.sqrt(1.0 - ioblk.planetests.impactParameter**2)
ioblk.planetests.surfaceBright = 1.0 - ioblk.planetests.u1*(1.0-mu) - \
ioblk.planetests.u2*(1.0-mu)**2
ioblk.planetests.equivRadiusRatio = ioblk.planetests.radiusRatio / \
np.sqrt(ioblk.planetests.surfaceBright)
return ioblk
def trapezoid_fit(timeSeries, dataSeries, errorSeries, \
signalPeriod, signalEpoch, signalDuration, signalDepth, \
fitTrialN=13, fitRegion=4.0, errorScale=1.0, debugLevel=0,
sampleN=15, showFitInterval=30):
"""Perform a trapezoid fit to a normalized flux time series
Assumes all data has the same cadence duration
Period is fixed during the trapezoid fit
AUTHOR: Christopher J Burke
INPUT:
timeSeries - Mid cadence time stamps
dataSeries - Normalized time series
errorSeries - Error time series
signalPeriod - Period of signal ***assumed fixed during model fit**
signalEpoch - Estimated epoch of signal. Must be on same system
as timeSeries
signalDuration [hr] - Estimated signal duration ***In hours**
signalDepth [ppm] - Estimated signal depth
fitTrialN - How many trial fits to perform starting at random
initial locations. Increase this if you find the
minimization is returning local minima
fitRegion - Fit data within fitRegion*signalDuration of signalEpoch
errorScale - Default 1.0 - Scale the errorbars by this factor
debugLevel - 0 Show nothing; 1-Show some text about iterations
2 Show some more text; 3 - Show graphical fit in
progress; 4 - pause for each graphical fit
sampleN - Subsample each cadence by this factor
showFitInterval - If debugLevel >=3 the show every showFitInterval
function evaluation
OUTPUT:
ioblk - An instance of trp_ioblk which is a class used to store
all information pertaining to fit results
"""
# Instantiate trp_ioblk class and fill in values
ioblk = trp_ioblk()
ioblk.parm.debugLevel = debugLevel
ioblk.parm.samplen = sampleN
ioblk.parm.likehoodmoddisplay = showFitInterval
ioblk.fitregion = fitRegion
ioblk.normlc = dataSeries
ioblk.normes = errorSeries
ioblk.errscl = errorScale
ioblk.normots = timeSeries
ioblk.origests.period = signalPeriod
ioblk.origests.epoch = signalEpoch
ioblk.origests.duration = signalDuration # input duration is hours
ioblk.origests.depth = signalDepth
# Calculate this from timeSeries
ioblk.parm.cadlen = np.median(np.diff(timeSeries))
# setup some more variables
ioblk = trp_setup(ioblk)
# Find solution by trying random initial conditions
ioblk = trp_iterate_solution(ioblk,fitTrialN)
# Convert the trapezoid fit solution into a pseudo planet model parameters
ioblk = trp_estimate_planet(ioblk)
# Raise an exception if final model is consistent with flat
if (np.all(np.abs(ioblk.modellc - ioblk.modellc[0]) \
< (10.0 * sys.float_info.epsilon))):
raise ValueError("TrapFit: Model light curve is flat!")
# Check for NaNs in output model
if (np.any(np.isnan(ioblk.modellc))):
raise ValueError("TrapFit: Output Model light curve contains NaN")
return ioblk
# Run the test of a trapezoid model fit in gaussian noise
if __name__ == "__main__":
# Make some fake data
dataSpan = 80.0 # in Days
exposureLength = 1.0/48.0 # in Days simulating 48 cadences per day
nData = dataSpan / exposureLength
noiseLevel = 40.0 # noise per observation in ppm
signalDepth = 300.0 # signal depth in ppm
signalDuration = 5.0 / 24.0 # in Days
signalDurationHours = signalDuration * 24.0
signalPeriod = 10.4203 # in Days
signalEpoch = 5.1 # in Days
timeSeries = np.linspace(0.0, dataSpan, nData);
dataSeries = 1.0 + np.random.randn(nData) / 1.0e6 * noiseLevel
errorSeries = np.full_like(dataSeries,noiseLevel/1.0e6)
# Instantiate trp_ioblk class and fill in values
ioblk = trp_ioblk()
ioblk.parm.samplen = 15
ioblk.parm.cadlen = exposureLength
ioblk.fitregion = 4.0
ioblk.normlc = dataSeries
ioblk.normes = errorSeries
ioblk.normots = timeSeries
ioblk.origests.period = signalPeriod
ioblk.origests.epoch = signalEpoch
ioblk.origests.duration = signalDurationHours # input duration is hours
ioblk.origests.depth = signalDepth
# setup some more variables
ioblk = trp_setup(ioblk)
ioblk.physvals = np.array([0.0, signalDepth/1.0e6, signalDuration, 0.1])
# Make a model trapezoid light curve
ioblk, err = trapezoid_model(ioblk)
#Phase data
phasedSeries = phaseData(timeSeries, signalPeriod, signalEpoch)
# Insert signal
phaseDuration = signalDuration / signalPeriod
dataSeries = dataSeries * ioblk.modellc
#plt.plot(phasedSeries, dataSeries, '.')
#plt.show()
#plt.plot(timeSeries, dataSeries, '.')
#plt.show()
# Test fitting
ioblk = trapezoid_fit(timeSeries, dataSeries, errorSeries, \
signalPeriod, signalEpoch+0.001, signalDurationHours*0.9, \
signalDepth*1.1, \
fitTrialN=2, fitRegion=4.0, errorScale=1.0, debugLevel=3,
sampleN=15, showFitInterval=30)
print ioblk
# test generating model
newioblk = trapezoid_model_onemodel(timeSeries, signalPeriod, \
signalEpoch, signalDepth, signalDurationHours, \
signalDurationHours*0.1, ioblk.parm.samplen)
plt.close('all')
plt.plot(phasedSeries, newioblk.modellc,'.b')
newioblk = trapezoid_model_raw(newioblk, signalEpoch+0.05, signalDepth*1.5, \
signalDurationHours*2.0, signalDurationHours*2.0*0.2)
plt.plot(phasedSeries, newioblk.modellc, '.r')
plt.show()
|
barentsen/dave
|
trapezoidFit/trapfit.py
|
Python
|
mit
| 28,177
|
[
"Gaussian"
] |
b8f962b2c9c158dd9061b71130d4a1e0e30c61330bc2d9c261f4b7eae41081a9
|
#
# -*- coding: utf-8 -*-
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2008 Raphael Ackermann
# 2002-2003 Donald A. Peterson
# 2003 Alex Roitman
# 2009 Benny Malengier
# 2010 Peter Landgren
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
# 2011-2012 Harald Rosemann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""LaTeX document generator"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
from bisect import bisect
import re
import os
import logging
try:
from PIL import Image
HAVE_PIL = True
except ImportError:
HAVE_PIL = False
#----------------------------------------------------------------------- -
#
# Gprime modules
#
#------------------------------------------------------------------------
from gprime.plug.docgen import (BaseDoc, TextDoc, PAPER_LANDSCAPE,
FONT_SANS_SERIF, URL_PATTERN)
from gprime.plug.docbackend import DocBackend
from gprime.const import LOCALE as glocale
_ = glocale.translation.gettext
_LOG = logging.getLogger(".latexdoc")
_CLICKABLE = '\\url{\\1}'
#------------------------------------------------------------------------
#
# Special settings for LaTeX output
#
#------------------------------------------------------------------------
# For an interim mark e.g. for an intended linebreak I use a special pattern.
# It shouldn't interfere with normal text. In LaTeX charackter '&' is used
# for column separation in tables and may occur there in series. The pattern
# is used here before column separation is set. On the other hand incoming
# text can't show this pattern for it would have been replaced by '\&\&'.
# So the choosen pattern will do the job without confusion:
SEPARATION_PAT = '&&'
#------------------------------------------------------------------------
#
# LaTeX Article Template
#
#------------------------------------------------------------------------
_LATEX_TEMPLATE_1 = '\\documentclass[%s]{article}\n'
_LATEX_TEMPLATE = '''%
%
\\usepackage[T1]{fontenc}%
%
% We use latin1 encoding at a minimum by default.
% GRAMPS uses unicode UTF-8 encoding for its
% international support. LaTeX can deal gracefully
% with unicode encoding by using the ucs style invoked
% when utf8 is specified as an option to the inputenc
% package. This package is included by default in some
% installations, but not in others, so we do not make it
% the default. Uncomment the first line if you wish to use it
% (If you do not have ucs.sty, you may obtain it from
% http://www.tug.org/tex-archive/macros/latex/contrib/supported/unicode/)
%
%\\usepackage[latin1]{inputenc}%
\\usepackage[latin1,utf8]{inputenc}%
\\usepackage{graphicx}% Extended graphics support
\\usepackage{longtable}% For multi-page tables
\\usepackage{calc}% For some calculations
\\usepackage{ifthen}% For table width calculations
\\usepackage{ragged2e}% For left aligning with hyphenation
\\usepackage{wrapfig}% wrap pictures in text
%
% Depending on your LaTeX installation, the margins may be too
% narrow. This can be corrected by uncommenting the following
% two lines and adjusting the width appropriately. The example
% removes 0.5in from each margin. (Adds 1 inch to the text)
%\\addtolength{\\oddsidemargin}{-0.5in}%
%\\addtolength{\\textwidth}{1.0in}%
%
% Vertical spacing between paragraphs:
% take one of three possibilities or modify to your taste:
%\\setlength{\\parskip}{1.0ex plus0.2ex minus0.2ex}%
\\setlength{\\parskip}{1.5ex plus0.3ex minus0.3ex}%
%\\setlength{\\parskip}{2.0ex plus0.4ex minus0.4ex}%
%
% Vertical spacing between lines:
% take one of three possibilities or modify to your taste:
\\renewcommand{\\baselinestretch}{1.0}%
%\\renewcommand{\\baselinestretch}{1.1}%
%\\renewcommand{\\baselinestretch}{1.2}%
%
% Indentation; substitute for '1cm' of gramps, 2.5em is right for 12pt
% take one of three possibilities or modify to your taste:
\\newlength{\\grbaseindent}%
%\\setlength{\\grbaseindent}{3.0em}%
\\setlength{\\grbaseindent}{2.5em}%
%\\setlength{\\grbaseindent}{2.0em}%
%
%
% -------------------------------------------------------------
% New lengths, counters and commands for calculations in tables
% -------------------------------------------------------------
%
\\newlength{\\grtabwidth}%
\\newlength{\\grtabprepos}%
\\newlength{\\grreqwidth}%
\\newlength{\\grtempwd}%
\\newlength{\\grmaxwidth}%
\\newlength{\\grprorated}%
\\newlength{\\grxwd}%
\\newlength{\\grwidthused}%
\\newlength{\\grreduce}%
\\newlength{\\grcurcolend}%
\\newlength{\\grspanwidth}%
\\newlength{\\grleadlabelwidth}%
\\newlength{\\grminpgindent}%
\\newlength{\\grlistbacksp}%
\\newlength{\\grpictsize}%
\\newlength{\\grmaxpictsize}%
\\newlength{\\grtextsize}%
\\newlength{\\grmaxtextsize}%
\\newcounter{grtofixcnt}%
\\newcounter{grxwdcolcnt}%
%
%
\\newcommand{\\grinitlength}[2]{%
\\ifthenelse{\\isundefined{#1}}%
{\\newlength{#1}}{}%
\\setlength{#1}{#2}%
}%
%
\\newcommand{\\grinittab}[2]{% #1: tabwidth, #2 = 1.0/anz-cols
\\setlength{\\grtabwidth}{#1}%
\\setlength{\\grprorated}{#2\\grtabwidth}%
\\setlength{\\grwidthused}{0em}%
\\setlength{\\grreqwidth}{0em}%
\\setlength{\\grmaxwidth }{0em}%
\\setlength{\\grxwd}{0em}%
\\setlength{\\grtempwd}{0em}%
\\setlength{\\grpictsize}{0em}%
\\setlength{\\grmaxpictsize}{0em}%
\\setlength{\\grtextsize}{0em}%
\\setlength{\\grmaxtextsize}{0em}%
\\setlength{\\grcurcolend}{0em}%
\\setcounter{grxwdcolcnt}{0}%
\\setcounter{grtofixcnt}{0}% number of wide cols%
\\grinitlength{\\grcolbega}{0em}% beg of first col
}%
%
\\newcommand{\\grmaxvaltofirst}[2]{%
\\ifthenelse{\\lengthtest{#1 < #2}}%
{\\setlength{#1}{#2}}{}%
}%
%
\\newcommand{\\grsetreqfull}{%
\\grmaxvaltofirst{\\grmaxpictsize}{\\grpictsize}%
\\grmaxvaltofirst{\\grmaxtextsize}{\\grtextsize}%
}%
%
\\newcommand{\\grsetreqpart}[1]{%
\\addtolength{\\grtextsize}{#1 - \\grcurcolend}%
\\addtolength{\\grpictsize}{#1 - \\grcurcolend}%
\\grsetreqfull%
}%
%
\\newcommand{\\grdividelength}{%
\\setlength{\\grtempwd}{\\grtabwidth - \\grwidthused}%
% rough division of lengths:
% if 0 < #1 <= 10: \\grxwd = ~\\grtempwd / grtofixcnt
% otherwise: \\grxwd = \\grprorated
\\ifthenelse{\\value{grtofixcnt} > 0}%
{\\ifthenelse{\\value{grtofixcnt}=1}%
{\\setlength{\\grxwd}{\\grtempwd}}{%
\\ifthenelse{\\value{grtofixcnt}=2}
{\\setlength{\\grxwd}{0.5\\grtempwd}}{%
\\ifthenelse{\\value{grtofixcnt}=3}
{\\setlength{\\grxwd}{0.333\\grtempwd}}{%
\\ifthenelse{\\value{grtofixcnt}=4}
{\\setlength{\\grxwd}{0.25\\grtempwd}}{%
\\ifthenelse{\\value{grtofixcnt}=5}
{\\setlength{\\grxwd}{0.2\\grtempwd}}{%
\\ifthenelse{\\value{grtofixcnt}=6}
{\\setlength{\\grxwd}{0.166\\grtempwd}}{%
\\ifthenelse{\\value{grtofixcnt}=7}
{\\setlength{\\grxwd}{0.143\\grtempwd}}{%
\\ifthenelse{\\value{grtofixcnt}=8}
{\\setlength{\\grxwd}{0.125\\grtempwd}}{%
\\ifthenelse{\\value{grtofixcnt}=9}
{\\setlength{\\grxwd}{0.111\\grtempwd}}{%
\\ifthenelse{\\value{grtofixcnt}=10}
{\\setlength{\\grxwd}{0.1\\grtempwd}}{%
\\setlength{\\grxwd}{\\grprorated}% give up, take \\grprorated%
}}}}}}}}}}%
\\setlength{\\grreduce}{0em}%
}{\\setlength{\\grxwd}{0em}}%
}%
%
\\newcommand{\\grtextneedwidth}[1]{%
\\settowidth{\\grtempwd}{#1}%
\\grmaxvaltofirst{\\grtextsize}{\\grtempwd}%
}%
%
\\newcommand{\\grcolsfirstfix}[5]{%
\\grinitlength{#1}{\\grcurcolend}%
\\grinitlength{#3}{0em}%
\\grinitlength{#4}{\\grmaxpictsize}%
\\grinitlength{#5}{\\grmaxtextsize}%
\\grinitlength{#2}{#5}%
\\grmaxvaltofirst{#2}{#4}%
\\addtolength{#2}{2\\tabcolsep}%
\\grmaxvaltofirst{\\grmaxwidth}{#2}%
\\ifthenelse{\\lengthtest{#2 < #4} \\or \\lengthtest{#2 < \\grprorated}}%
{ \\setlength{#3}{#2}%
\\addtolength{\\grwidthused}{#2} }%
{ \\stepcounter{grtofixcnt} }%
\\addtolength{\\grcurcolend}{#2}%
}%
%
\\newcommand{\\grcolssecondfix}[4]{%
\\ifthenelse{\\lengthtest{\\grcurcolend < \\grtabwidth}}%
{ \\setlength{#3}{#2} }%
{ \\addtolength{#1}{-\\grreduce}%
\\ifthenelse{\\lengthtest{#2 = \\grmaxwidth}}%
{ \\stepcounter{grxwdcolcnt}}%
{ \\ifthenelse{\\lengthtest{#3 = 0em} \\and %
\\lengthtest{#4 > 0em}}%
{ \\setlength{\\grtempwd}{#4}%
\\grmaxvaltofirst{\\grtempwd}{\\grxwd}%
\\addtolength{\\grreduce}{#2 - \\grtempwd}%
\\setlength{#2}{\\grtempwd}%
\\addtolength{\\grwidthused}{#2}%
\\addtocounter{grtofixcnt}{-1}%
\\setlength{#3}{#2}%
}{}%
}%
}%
}%
%
\\newcommand{\\grcolsthirdfix}[3]{%
\\ifthenelse{\\lengthtest{\\grcurcolend < \\grtabwidth}}%
{}{ \\addtolength{#1}{-\\grreduce}%
\\ifthenelse{\\lengthtest{#3 = 0em} \\and %
\\lengthtest{#2 < \\grmaxwidth}}%
{ \\ifthenelse{\\lengthtest{#2 < 0.5\\grmaxwidth}}%
{ \\setlength{\\grtempwd}{0.5\\grxwd}%
\\grmaxvaltofirst{\\grtempwd}{0.7\\grprorated}}%
{ \\setlength{\\grtempwd}{\\grxwd}}%
\\addtolength{\\grreduce}{#2 - \\grtempwd}%
\\setlength{#2}{\\grtempwd}%
\\addtolength{\\grwidthused}{#2}%
\\addtocounter{grtofixcnt}{-1}%
\\setlength{#3}{#2}%
}{}%
}%
}%
%
\\newcommand{\\grcolsfourthfix}[3]{%
\\ifthenelse{\\lengthtest{\\grcurcolend < \\grtabwidth}}%
{}{ \\addtolength{#1}{-\\grreduce}%
\\ifthenelse{\\lengthtest{#3 = 0em}}%
{ \\addtolength{\\grreduce}{#2 - \\grxwd}%
\\setlength{#2}{\\grxwd}%
\\setlength{#3}{#2}%
}{}%
}%
}%
%
\\newcommand{\\grgetspanwidth}[4]{%
\\grinitlength{#1}{#3 - #2 + #4}%
}%
%
\\newcommand{\\tabheadstrutceil}{%
\\rule[0.0ex]{0.00em}{3.5ex}}%
\\newcommand{\\tabheadstrutfloor}{%
\\rule[-2.0ex]{0.00em}{2.5ex}}%
\\newcommand{\\tabrowstrutceil}{%
\\rule[0.0ex]{0.00em}{2.9ex}}%
\\newcommand{\\tabrowstrutfloor}{%
\\rule[-0.1ex]{0.00em}{2.0ex}}%
%
\\newcommand{\\grempty}[1]{}%
%
\\newcommand{\\graddvdots}[1]{%
\\hspace*{\\fill}\\hspace*{\\fill}\\raisebox{#1}{\\vdots}%
}%
%
\\newcommand{\\grtabpgbreak}[4]{%
#1 { \\parbox[t]{ #2 - 2\\tabcolsep}{\\tabheadstrutceil\\hspace*{\\fill}%
\\raisebox{#4}{\\vdots} #3{#4} \\hspace*{\\fill}\\tabheadstrutfloor}}%
}%
%
\\newcommand{\\grcolpart}[3]{%
#1 { \\parbox[t]{ #2 - 2\\tabcolsep}%
{\\tabrowstrutceil #3~\\\\[-1.6ex]\\tabrowstrutfloor}}%
}%
%
\\newcommand{\\grminpghead}[2]{%
\\setlength{\\grminpgindent}{#1\\grbaseindent-\\grlistbacksp}%
\\hspace*{\\grminpgindent}%
\\ifthenelse{\\not \\lengthtest{#2em > 0em}}%
{\\begin{minipage}[t]{\\textwidth -\\grminpgindent}}%
{\\begin{minipage}[t]{\\textwidth -\\grminpgindent%
-#2\\grbaseindent -4\\tabcolsep}}%
}%
%
\\newcommand{\\grminpgtail}{%
\\end{minipage}\\parindent0em%
}%
%
\\newcommand{\\grlisthead}[1]{%
\\begin{list}{#1}%
{ \\setlength{\\labelsep}{0.5em}%
\\setlength{\\labelwidth}{\\grleadlabelwidth}%
\\setlength{\\leftmargin}{\\grlistbacksp}%
}\\item%
}%
%
\\newcommand{\\grlisttail}{%
\\end{list}%
}%
%
\\newcommand{\\grprepleader}[1]{%
\\settowidth{\\grtempwd}{#1}%
\\ifthenelse{\\lengthtest{\\grtempwd > \\grleadlabelwidth}}%
{ \\setlength{\\grleadlabelwidth}{\\grtempwd}}{}%
\\setlength{\\grlistbacksp}{\\grleadlabelwidth + 1.0em}%
}%
%
\\newcommand{\\grprepnoleader}{%
\\setlength{\\grleadlabelwidth}{0em}%
\\setlength{\\grlistbacksp}{0em}%
}%
%
\\newcommand{\\grmkpicture}[4]{%
\\begin{wrapfigure}{r}{#2\\grbaseindent}%
\\vspace{-6ex}%
\\begin{center}%
\\includegraphics[%
width= #2\\grbaseindent,%
height= #3\\grbaseindent,%
keepaspectratio]%
{#1}\\\\%
{\\RaggedRight\\footnotesize#4}%
\\end{center}%
\\end{wrapfigure}%
\\settowidth{\\grtempwd}{\\footnotesize#4}%
\\setlength{\\grxwd}{#2\\grbaseindent}%
\\ifthenelse{\\lengthtest{\\grtempwd < 0.7\\grxwd}}%
{\\setlength{\\grxwd}{1ex}}{%
\\ifthenelse{\\lengthtest{\\grtempwd < 1.2\\grxwd}}%
{\\setlength{\\grxwd}{2ex}}{%
\\ifthenelse{\\lengthtest{\\grtempwd < 1.8\\grxwd}}%
{\\setlength{\\grxwd}{6ex}}{%
\\ifthenelse{\\lengthtest{\\grtempwd < 2.0\\grxwd}}%
{\\setlength{\\grxwd}{10ex}}{%
\\setlength{\\grxwd}{12ex}}%
}}}%
\\setlength{\\grtempwd}{#3\\grbaseindent + \\grxwd}%
\\rule[-\\grtempwd]{0pt}{\\grtempwd}%
\\setlength{\\grtabprepos}{-\\grtempwd}%
}%
%
%
\\begin{document}%
'''
#------------------------------------------------------------------------
#
# Font size table and function
#
#------------------------------------------------------------------------
# These tables correlate font sizes to LaTeX. The first table contains
# typical font sizes in points. The second table contains the standard
# LaTeX font size names. Since we use bisect to map the first table to the
# second, we are guaranteed that any font less than 6 points is 'tiny', fonts
# from 6-7 points are 'script', etc. and fonts greater than or equal to 22
# are considered 'Huge'. Note that fonts from 12-13 points are not given a
# LaTeX font size name but are considered "normal."
_FONT_SIZES = [6, 8, 10, 12, 14, 16, 18, 20, 22]
_FONT_NAMES = ['tiny', 'scriptsize', 'footnotesize', 'small', '',
'large', 'Large', 'LARGE', 'huge', 'Huge']
def map_font_size(fontsize):
""" Map font size in points to LaTeX font size """
return _FONT_NAMES[bisect(_FONT_SIZES, fontsize)]
#------------------------------------------------------------------------
#
# auxiliaries to facilitate table construction
#
#------------------------------------------------------------------------
# patterns for regular expressions, module re:
TBLFMT_PAT = re.compile(r'({\|?)l(\|?})')
# constants for routing in table construction:
(CELL_BEG, CELL_TEXT, CELL_END, ROW_BEG, ROW_END, TAB_BEG,
TAB_END) = list(range(7))
FIRST_ROW, SUBSEQ_ROW = list(range(2))
def get_charform(col_num):
"""
Transfer column number to column charakter,
limited to letters within a-z;
26, there is no need for more.
early test of column count in start_table()
"""
if col_num > ord('z') - ord('a'):
raise ValueError(''.join((
'\n number of table columns is ', repr(col_num),
'\n should be <= ', repr(ord('z') - ord('a')))))
return chr(ord('a') + col_num)
def get_numform(col_char):
return ord(col_char) - ord('a')
#------------------------------------------
# row_alph_counter = str_incr(MULTCOL_COUNT_BASE)
#
# 'aaa' is sufficient for up to 17576 multicolumns in each table;
# do you need more?
# uncomment one of the two lines
MULTCOL_COUNT_BASE = 'aaa'
# MULTCOL_COUNT_BASE = 'aaaa'
#------------------------------------------
def str_incr(str_counter):
""" for counting table rows """
lili = list(str_counter)
while 1:
yield ''.join(lili)
if ''.join(lili) == len(lili)*'z':
raise ValueError(''.join((
'\n can\'t increment string ', ''.join(lili),
' of length ', str(len(lili)))))
for i in range(len(lili)-1, -1, -1):
if lili[i] < 'z':
lili[i] = chr(ord(lili[i])+1)
break
else:
lili[i] = 'a'
#------------------------------------------------------------------------
#
# Structure of Table-Memory
#
#------------------------------------------------------------------------
class TabCell:
def __init__(self, colchar, span, head, content):
self.colchar = colchar
self.span = span
self.head = head
self.content = content
class TabRow:
def __init__(self):
self.cells = []
self.tail = ''
self.addit = '' # for: \\hline, \\cline{}
class TabMem:
def __init__(self, head):
self.head = head
self.tail = ''
self.rows = []
#------------------------------------------------------------------------
#
# Functions for docbackend
#
#------------------------------------------------------------------------
def latexescape(text):
"""
Escape the following special characters: & $ % # _ { }
"""
text = text.replace('&', '\\&')
text = text.replace('$', '\\$')
text = text.replace('%', '\\%')
text = text.replace('#', '\\#')
text = text.replace('_', '\\_')
text = text.replace('{', '\\{')
text = text.replace('}', '\\}')
# replace character unknown to LaTeX
text = text.replace('→', '$\\longrightarrow$')
return text
def latexescapeverbatim(text):
"""
Escape special characters and also make sure that LaTeX respects whitespace
and newlines correctly.
"""
text = latexescape(text)
text = text.replace(' ', '\\ ')
text = text.replace('\n', '~\\newline \n')
#spaces at begin are normally ignored, make sure they are not.
#due to above a space at begin is now \newline\n\
text = text.replace('\\newline\n\\ ',
'\\newline\n\\hspace*{0.1\\grbaseindent}\\ ')
return text
#------------------------------------------------------------------------
#
# Document Backend class for cairo docs
#
#------------------------------------------------------------------------
class LaTeXBackend(DocBackend):
"""
Implementation of docbackend for latex docs.
File and File format management for latex docs
"""
# overwrite base class attributes, they become static var of LaTeXDoc
SUPPORTED_MARKUP = [
DocBackend.BOLD,
DocBackend.ITALIC,
DocBackend.UNDERLINE,
DocBackend.FONTSIZE,
DocBackend.FONTFACE,
DocBackend.SUPERSCRIPT]
STYLETAG_MARKUP = {
DocBackend.BOLD : ("\\textbf{", "}"),
DocBackend.ITALIC : ("\\textit{", "}"),
DocBackend.UNDERLINE : ("\\underline{", "}"),
DocBackend.SUPERSCRIPT : ("\\textsuperscript{", "}"),
}
ESCAPE_FUNC = lambda x: latexescape
def setescape(self, preformatted=False):
"""
LaTeX needs two different escape functions depending on the type.
This function allows to switch the escape function
"""
if not preformatted:
LaTeXBackend.ESCAPE_FUNC = lambda x: latexescape
else:
LaTeXBackend.ESCAPE_FUNC = lambda x: latexescapeverbatim
def _create_xmltag(self, type, value):
"""
overwrites the method in DocBackend.
creates the latex tags needed for non bool style types we support:
FONTSIZE : use different \large denomination based
on size
: very basic, in mono in the font face
then we use {\ttfamily }
"""
if type not in self.SUPPORTED_MARKUP:
return None
elif type == DocBackend.FONTSIZE:
#translate size in point to something LaTeX can work with
fontsize = map_font_size(value)
if fontsize:
return ("{\\" + fontsize + ' ', "}")
else:
return ("", "")
elif type == DocBackend.FONTFACE:
if 'MONO' in value.upper():
return ("{\\ttfamily ", "}")
elif 'ROMAN' in value.upper():
return ("{\\rmfamily ", "}")
return None
def _checkfilename(self):
"""
Check to make sure filename satisfies the standards for this filetype
"""
if not self._filename.endswith(".tex"):
self._filename = self._filename + ".tex"
#------------------------------------------------------------------------
#
# Paragraph Handling
#
#------------------------------------------------------------------------
class TexFont:
def __init__(self, style=None):
if style:
self.font_beg = style.font_beg
self.font_end = style.font_end
self.left_indent = style.left_indent
self.first_line_indent = style.first_line_indent
else:
self.font_beg = ""
self.font_end = ""
self.left_indent = ""
self.first_line_indent = ""
#------------------------------------------------------------------
#
# LaTeXDoc
#
#------------------------------------------------------------------
class LaTeXDoc(BaseDoc, TextDoc):
"""LaTeX document interface class. Derived from BaseDoc"""
# ---------------------------------------------------------------
# some additional variables
# ---------------------------------------------------------------
in_table = False
in_multrow_cell = False # for tab-strukt: cols of rows
pict = ''
pict_in_table = False
pict_width = 0
pict_height = 0
textmem = []
in_title = True
# ---------------------------------------------------------------
# begin of table special treatment
# ---------------------------------------------------------------
def emit(self, text, tab_state=CELL_TEXT, span=1):
"""
Hand over all text but tables to self._backend.write(), (line 1-2).
In case of tables pass to specal treatment below.
"""
if not self.in_table: # all stuff but table
self._backend.write(text)
else:
self.handle_table(text, tab_state, span)
def handle_table(self, text, tab_state, span):
"""
Collect tables elements in an adequate cell/row/table structure and
call for LaTeX width calculations and writing out
"""
if tab_state == CELL_BEG:
# here text is head
self.textmem = []
self.curcol_char = get_charform(self.curcol-1)
if span > 1: # phantom columns prior to multicolumns
for col in range(self.curcol - span, self.curcol - 1):
col_char = get_charform(col)
phantom = TabCell(col_char, 0, '', '')
self.tabrow.cells.append(phantom)
self.tabcell = TabCell(self.curcol_char, span, text, '')
elif tab_state == CELL_TEXT:
self.textmem.append(text)
elif tab_state == CELL_END: # text == ''
self.tabcell.content = ''.join(self.textmem).strip()
if self.tabcell.content.find('\\centering') != -1:
self.tabcell.content = self.tabcell.content.replace(
'\\centering', '')
self.tabcell.head = re.sub(
TBLFMT_PAT, '\\1c\\2', self.tabcell.head)
self.tabrow.cells.append(self.tabcell)
self.textmem = []
elif tab_state == ROW_BEG:
self.tabrow = TabRow()
elif tab_state == ROW_END:
self.tabrow.addit = text # text: \\hline, \\cline{}
self.tabrow.tail = ''.join(self.textmem) # \\\\ row-termination
if self.in_multrow_cell: # cols of rows: convert to rows of cols
self.repack_row()
else:
self.tabmem.rows.append(self.tabrow)
elif tab_state == TAB_BEG: # text: \\begin{longtable}[l]{
self._backend.write(''.join(('\\grinittab{\\textwidth}{',
repr(1.0/self.numcols), '}%\n')))
self.tabmem = TabMem(text)
elif tab_state == TAB_END: # text: \\end{longtable}
self.tabmem.tail = text
# table completed, calc widths and write out
self.calc_latex_widths()
self.write_table()
def repack_row(self):
"""
Transpose contents contained in a row of cols of cells
to rows of cells with corresponding contents.
Cols of the mult-row-cell are ended by SEPARATION_PAT
"""
# if last col empty: delete
if self.tabrow.cells[-1].content == '':
del self.tabrow.cells[-1]
self.numcols -= 1
# extract cell.contents
bare_contents = [cell.content.strip(SEPARATION_PAT).replace(
'\n', '').split(SEPARATION_PAT) for cell in self.tabrow.cells]
# mk equal length & transpose
num_new_rows = max([len(mult_row_cont)
for mult_row_cont in bare_contents])
cols_equ_len = []
for mrc in bare_contents:
for i in range(num_new_rows - len(mrc)):
mrc.append('')
cols_equ_len.append(mrc)
transp_cont = list(zip(*cols_equ_len))
# picts? extract
first_cell, last_cell = (0, self.numcols)
if self.pict_in_table:
if transp_cont[0][-1].startswith('\\grmkpicture'):
self.pict = transp_cont[0][-1]
last_cell -= 1
self.numcols -= 1
self._backend.write(''.join(
('\\addtolength{\\grtabwidth}{-',
repr(self.pict_width),
'\\grbaseindent -2\\tabcolsep}%\n')))
self.pict_in_table = False
# new row-col structure
for row in range(num_new_rows):
new_row = TabRow()
for i in range(first_cell, last_cell):
new_cell = TabCell(
get_charform(i + first_cell),
self.tabrow.cells[i].span, self.tabrow.cells[i].head,
transp_cont[row][i + first_cell])
new_row.cells.append(new_cell)
new_row.tail = self.tabrow.tail
new_row.addit = ''
self.tabmem.rows.append(new_row)
self.tabmem.rows[-1].addit = self.tabrow.addit
self.in_multrow_cell = False
return
def calc_latex_widths(self):
"""
Control width settings in latex table construction
Evaluations are set up here and passed to LaTeX
to calculate required and to fix suitable widths.
??? Can all this be done exclusively in TeX? Don't know how.
"""
tabcol_chars = []
for col_num in range(self.numcols):
col_char = get_charform(col_num)
tabcol_chars.append(col_char)
for row in self.tabmem.rows:
cell = row.cells[col_num]
if cell.span == 0:
continue
if cell.content.startswith('\\grmkpicture'):
self._backend.write(
''.join(('\\setlength{\\grpictsize}{',
self.pict_width, '\\grbaseindent}%\n')))
else:
for part in cell.content.split(SEPARATION_PAT):
self._backend.write(
''.join(('\\grtextneedwidth{', part, '}%\n')))
row.cells[col_num].content = cell.content.replace(
SEPARATION_PAT, '~\\newline \n')
if cell.span == 1:
self._backend.write(''.join(('\\grsetreqfull%\n')))
elif cell.span > 1:
self._backend.write(
''.join(('\\grsetreqpart{\\grcolbeg',
get_charform(get_numform(cell.colchar) -
cell.span +1),
'}%\n')))
self._backend.write(
''.join(('\\grcolsfirstfix',
' {\\grcolbeg', col_char, '}{\\grtempwidth', col_char,
'}{\\grfinalwidth', col_char, '}{\\grpictreq',
col_char, '}{\\grtextreq', col_char, '}%\n')))
self._backend.write(''.join(('\\grdividelength%\n')))
for col_char in tabcol_chars:
self._backend.write(
''.join(('\\grcolssecondfix',
' {\\grcolbeg', col_char, '}{\\grtempwidth', col_char,
'}{\\grfinalwidth', col_char, '}{\\grpictreq',
col_char, '}%\n')))
self._backend.write(''.join(('\\grdividelength%\n')))
for col_char in tabcol_chars:
self._backend.write(
''.join(('\\grcolsthirdfix',
' {\\grcolbeg', col_char, '}{\\grtempwidth', col_char,
'}{\\grfinalwidth', col_char, '}%\n')))
self._backend.write(''.join(('\\grdividelength%\n')))
for col_char in tabcol_chars:
self._backend.write(
''.join(('\\grcolsfourthfix',
' {\\grcolbeg', col_char, '}{\\grtempwidth', col_char,
'}{\\grfinalwidth', col_char, '}%\n')))
self.multcol_alph_counter = str_incr(MULTCOL_COUNT_BASE)
for row in self.tabmem.rows:
for cell in row.cells:
if cell.span > 1:
multcol_alph_id = next(self.multcol_alph_counter)
self._backend.write(
''.join(('\\grgetspanwidth{',
'\\grspanwidth', multcol_alph_id,
'}{\\grcolbeg', get_charform(
get_numform(cell.colchar)- cell.span + 1),
'}{\\grcolbeg', cell.colchar,
'}{\\grtempwidth', cell.colchar,
'}%\n')))
def write_table(self):
# Choosing RaggedRight (with hyphenation) in table and
# provide manually adjusting of column widths
self._backend.write(
''.join((
'%\n', self.pict,
'%\n%\n',
'% ==> Comment out one of the two lines ',
'by a leading "%" (first position)\n',
'{ \\RaggedRight% left align with hyphenation in table \n',
'%{% no left align in table \n%\n',
'% ==> You may add pos or neg values ',
'to the following ', repr(self.numcols), ' column widths %\n')))
for col_num in range(self.numcols):
self._backend.write(
''.join(('\\addtolength{\\grtempwidth',
get_charform(col_num), '}{+0.0cm}%\n')))
self._backend.write('% === %\n')
# adjust & open table':
if self.pict:
self._backend.write(
''.join(('%\n\\vspace{\\grtabprepos}%\n',
'\\setlength{\\grtabprepos}{0ex}%\n')))
self.pict = ''
self._backend.write(''.join(self.tabmem.head))
# special treatment at begin of longtable for heading and
# closing at top and bottom of table
# and parts of it at pagebreak separating
self.multcol_alph_counter = str_incr(MULTCOL_COUNT_BASE)
splitting_row = self.mk_splitting_row(self.tabmem.rows[FIRST_ROW])
self.multcol_alph_counter = str_incr(MULTCOL_COUNT_BASE)
complete_row = self.mk_complete_row(self.tabmem.rows[FIRST_ROW])
self._backend.write(splitting_row)
self._backend.write('\\endhead%\n')
self._backend.write(splitting_row.replace('{+2ex}', '{-2ex}'))
self._backend.write('\\endfoot%\n')
if self.head_line:
self._backend.write('\\hline%\n')
self.head_line = False
else:
self._backend.write('%\n')
self._backend.write(complete_row)
self._backend.write('\\endfirsthead%\n')
self._backend.write('\\endlastfoot%\n')
# hand over subsequent rows
for row in self.tabmem.rows[SUBSEQ_ROW:]:
self._backend.write(self.mk_complete_row(row))
# close table by '\\end{longtable}', end '{\\RaggedRight' or '{' by '}'
self._backend.write(''.join((''.join(self.tabmem.tail), '}%\n\n')))
def mk_splitting_row(self, row):
splitting = []
add_vdots = '\\grempty'
for cell in row.cells:
if cell.span == 0:
continue
if (not splitting and
get_numform(cell.colchar) == self.numcols - 1):
add_vdots = '\\graddvdots'
if cell.span == 1:
cell_width = ''.join(('\\grtempwidth', cell.colchar))
else:
cell_width = ''.join(('\\grspanwidth',
next(self.multcol_alph_counter)))
splitting.append(
''.join(('\\grtabpgbreak{', cell.head, '}{',
cell_width, '}{', add_vdots, '}{+2ex}%\n')))
return ''.join((' & '.join(splitting), '%\n', row.tail))
def mk_complete_row(self, row):
complete = []
for cell in row.cells:
if cell.span == 0:
continue
elif cell.span == 1:
cell_width = ''.join(('\\grtempwidth', cell.colchar))
else:
cell_width = ''.join(('\\grspanwidth',
next(self.multcol_alph_counter)))
complete.append(
''.join(('\\grcolpart{%\n ', cell.head, '}{%\n', cell_width,
'}{%\n ', cell.content, '%\n}%\n')))
return ''.join((' & '.join(complete), '%\n', row.tail, row.addit))
# ---------------------------------------------------------------------
# end of special table treatment
# ---------------------------------------------------------------------
def page_break(self):
"Forces a page break, creating a new page"
self.emit('\\newpage%\n')
def open(self, filename):
"""Opens the specified file, making sure that it has the
extension of .tex"""
self._backend = LaTeXBackend(filename)
self._backend.open()
# Font size control seems to be limited. For now, ignore
# any style constraints, and use 12pt has the default
options = "12pt"
if self.paper.get_orientation() == PAPER_LANDSCAPE:
options = options + ",landscape"
# Paper selections are somewhat limited on a stock installation.
# If the user picks something not listed here, we'll just accept
# the default of the user's LaTeX installation (usually letter).
paper_name = self.paper.get_size().get_name().lower()
if paper_name in ["a4", "a5", "legal", "letter"]:
options += ',' + paper_name + 'paper'
# Use the article template, T1 font encodings, and specify
# that we should use Latin1 and unicode character encodings.
self.emit(_LATEX_TEMPLATE_1 % options)
self.emit(_LATEX_TEMPLATE)
self.in_list = False
self.in_table = False
self.head_line = False
#Establish some local styles for the report
self.latexstyle = {}
self.latex_font = {}
style_sheet = self.get_style_sheet()
for style_name in style_sheet.get_paragraph_style_names():
style = style_sheet.get_paragraph_style(style_name)
font = style.get_font()
size = font.get_size()
self.latex_font[style_name] = TexFont()
thisstyle = self.latex_font[style_name]
thisstyle.font_beg = ""
thisstyle.font_end = ""
# Is there special alignment? (default is left)
align = style.get_alignment_text()
if align == "center":
thisstyle.font_beg += "{\\centering"
thisstyle.font_end = ''.join(("\n\n}", thisstyle.font_end))
elif align == "right":
thisstyle.font_beg += "\\hfill"
# Establish font face and shape
if font.get_type_face() == FONT_SANS_SERIF:
thisstyle.font_beg += "\\sffamily"
thisstyle.font_end = "\\rmfamily" + thisstyle.font_end
if font.get_bold():
thisstyle.font_beg += "\\bfseries"
thisstyle.font_end = "\\mdseries" + thisstyle.font_end
if font.get_italic() or font.get_underline():
thisstyle.font_beg += "\\itshape"
thisstyle.font_end = "\\upshape" + thisstyle.font_end
# Now determine font size
fontsize = map_font_size(size)
if fontsize:
thisstyle.font_beg += "\\" + fontsize
thisstyle.font_end += "\\normalsize"
thisstyle.font_beg += " "
thisstyle.font_end += " "
left = style.get_left_margin()
first = style.get_first_indent() + left
thisstyle.left_indent = left
thisstyle.first_line_indent = first
self.latexstyle[style_name] = thisstyle
def close(self):
"""Clean up and close the document"""
if self.in_list:
self.emit('\\end{list}\n')
self.emit('\\end{document}\n')
self._backend.close()
def end_page(self):
"""Issue a new page command"""
self.emit('\\newpage')
def start_paragraph(self, style_name, leader=None):
"""Paragraphs handling - A Gramps paragraph is any
single body of text from a single word to several sentences.
We assume a linebreak at the end of each paragraph."""
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
ltxstyle = self.latexstyle[style_name]
self.level = style.get_header_level()
self.fbeg = ltxstyle.font_beg
self.fend = ltxstyle.font_end
self.indent = ltxstyle.left_indent
self.first_line_indent = ltxstyle.first_line_indent
if self.indent == 0:
self.indent = self.first_line_indent
# For additional vertical space beneath title line(s)
# i.e. when the first centering ended:
if self.in_title and ltxstyle.font_beg.find('centering') == -1:
self.in_title = False
self._backend.write('\\vspace{5ex}%\n')
if self.in_table: # paragraph in table indicates: cols of rows
self.in_multrow_cell = True
else:
if leader:
self._backend.write(
''.join(('\\grprepleader{', leader, '}%\n')))
else:
self._backend.write('\\grprepnoleader%\n')
# -------------------------------------------------------------------
# Gramps presumes 'cm' as units; here '\\grbaseindent' is used
# as equivalent, set in '_LATEX_TEMPLATE' above to '3em';
# there another value might be choosen.
# -------------------------------------------------------------------
if self.indent is not None:
self._backend.write(
''.join(('\\grminpghead{', repr(self.indent), '}{',
repr(self.pict_width), '}%\n')))
self.fix_indent = True
if leader is not None and not self.in_list:
self.in_list = True
self._backend.write(''.join(('\\grlisthead{', leader,
'}%\n')))
if leader is None:
self.emit('\n')
self.emit('%s ' % self.fbeg)
def end_paragraph(self):
"""End the current paragraph"""
newline = '%\n\n'
if self.in_list:
self.in_list = False
self.emit('\n\\grlisttail%\n')
newline = ''
elif self.in_table:
newline = SEPARATION_PAT
self.emit('%s%s' % (self.fend, newline))
if self.fix_indent:
self.emit('\\grminpgtail%\n\n')
self.fix_indent = False
if self.pict_width:
self.pict_width = 0
self.pict_height = 0
def start_bold(self):
"""Bold face"""
self.emit('\\textbf{')
def end_bold(self):
"""End bold face"""
self.emit('}')
def start_superscript(self):
self.emit('\\textsuperscript{')
def end_superscript(self):
self.emit('}')
def start_table(self, name, style_name):
"""Begin new table"""
self.in_table = True
self.currow = 0
# We need to know a priori how many columns are in this table
styles = self.get_style_sheet()
self.tblstyle = styles.get_table_style(style_name)
self.numcols = self.tblstyle.get_columns()
tblfmt = '*{%d}{l}' % self.numcols
self.emit('\\begin{longtable}[l]{%s}\n' % (tblfmt), TAB_BEG)
def end_table(self):
"""Close the table environment"""
self.emit('%\n\\end{longtable}%\n', TAB_END)
self.in_table = False
def start_row(self):
"""Begin a new row"""
self.emit('', ROW_BEG)
# doline/skipfirst are flags for adding hor. rules
self.doline = False
self.skipfirst = False
self.curcol = 0
self.currow = self.currow + 1
def end_row(self):
"""End the row (new line)"""
self.emit('\\\\ ')
if self.doline:
if self.skipfirst:
self.emit(''.join((('\\cline{2-%d}' %
self.numcols), '%\n')), ROW_END)
else:
self.emit('\\hline %\n', ROW_END)
else:
self.emit('%\n', ROW_END)
self.emit('%\n')
def start_cell(self, style_name, span=1):
"""Add an entry to the table.
We always place our data inside braces
for safety of formatting."""
self.colspan = span
self.curcol = self.curcol + self.colspan
styles = self.get_style_sheet()
self.cstyle = styles.get_cell_style(style_name)
# ------------------------------------------------------------------
# begin special modification for boolean values
# values imported here are used for test '==1' and '!=0'. To get
# local boolean values the tests are now transfered to the import lines
# ------------------------------------------------------------------
self.lborder = self.cstyle.get_left_border() == 1
self.rborder = self.cstyle.get_right_border() == 1
self.bborder = self.cstyle.get_bottom_border() == 1
self.tborder = self.cstyle.get_top_border() != 0
# self.llist not needed any longer.
# now column widths are arranged in self.calc_latex_widths()
# serving for fitting of cell contents at any column position.
# self.llist = 1 == self.cstyle.get_longlist()
cellfmt = "l"
# Account for vertical rules
if self.lborder:
cellfmt = '|' + cellfmt
if self.rborder:
cellfmt = cellfmt + '|'
# and Horizontal rules
if self.bborder:
self.doline = True
elif self.curcol == 1:
self.skipfirst = True
if self.tborder:
self.head_line = True
# ------------------------------------------------------------------
# end special modification for boolean values
# ------------------------------------------------------------------
self.emit('\\multicolumn{%d}{%s}' % (span, cellfmt), CELL_BEG, span)
def end_cell(self):
"""Prepares for next cell"""
self.emit('', CELL_END)
def add_media(self, infile, pos, x, y, alt='', style_name=None, crop=None):
"""Add photo to report"""
outfile = os.path.splitext(infile)[0]
pictname = latexescape(os.path.split(outfile)[1])
outfile = ''.join((outfile, '.jpg'))
outfile2 = ''.join((outfile, '.jpeg'))
outfile3 = ''.join((outfile, '.png'))
if HAVE_PIL and infile not in [outfile, outfile2, outfile3]:
try:
curr_img = Image.open(infile)
curr_img.save(outfile)
width, height = curr_img.size
if height > width:
y = y*height/width
except IOError:
self.emit(''.join(('%\n *** Error: cannot convert ', infile,
'\n *** to ', outfile,
'%\n')))
elif not HAVE_PIL:
from gprime.config import config
if not config.get('interface.ignore-pil'):
from gprime.constfunc import has_display
if has_display():
from gprime.gui.dialog import MessageHideDialog
title = _("PIL (Python Imaging Library) not loaded.")
message = _("Production of jpg images from non-jpg images "
"in LaTeX documents will not be available. "
"Use your package manager to install "
"python-imaging or python-pillow or "
"python3-pillow")
MessageHideDialog(title, message, # TODO no-parent
'interface.ignore-pil')
self.emit(''.join(('%\n *** Error: cannot convert ', infile,
'\n *** to ', outfile,
'\n *** PIL not installed %\n')))
if self.in_table:
self.pict_in_table = True
self.emit(''.join(('\\grmkpicture{', outfile, '}{', repr(x), '}{',
repr(y), '}{', pictname, '}%\n')))
self.pict_width = x
self.pict_height = y
def write_text(self, text, mark=None, links=False):
"""Write the text to the file"""
if text == '\n':
text = ''
text = latexescape(text)
if links is True:
text = re.sub(URL_PATTERN, _CLICKABLE, text)
#hard coded replace of the underline used for missing names/data
text = text.replace('\\_'*13, '\\underline{\hspace{3\\grbaseindent}}')
self.emit(text + ' ')
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the latex doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. self ignores notes that contain html
links: bool, make URLs clickable if True
"""
if contains_html:
return
text = str(styledtext)
s_tags = styledtext.get_tags()
if format:
#preformatted, use different escape function
self._backend.setescape(True)
markuptext = self._backend.add_markup_from_styled(text, s_tags)
if links is True:
markuptext = re.sub(URL_PATTERN, _CLICKABLE, markuptext)
markuptext = self._backend.add_markup_from_styled(text, s_tags)
#there is a problem if we write out a note in a table.
# ..................
# now solved by postprocessing in self.calc_latex_widths()
# by explicitely setting suitable width for all columns.
#
if format:
self.start_paragraph(style_name)
self.emit(markuptext)
self.end_paragraph()
#preformatted finished, go back to normal escape function
self._backend.setescape(False)
else:
for line in markuptext.split('%\n%\n '):
self.start_paragraph(style_name)
for realline in line.split('\n'):
self.emit(realline)
self.emit("~\\newline \n")
self.end_paragraph()
|
sam-m888/gprime
|
gprime/plugins/docgen/latexdoc.py
|
Python
|
gpl-2.0
| 49,156
|
[
"Brian"
] |
e591469584b95e7c5824ce12248a158b1b167202bc939194cf06f0fc766cd216
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Replacement code for the Pyke rules.
For now, we are still emulating various aspects of how our original Pyke-based
code used the Pyke 'engine' to hold translation data, both Pyke-specific and
not :
1) basic details from the iris.fileformats.cf analysis of the file are
recorded before translating each output cube, using
"engine.assert_case_specific_fact(name, args)".
2) this is also used to store intermediate info passed between rules, which
used to be done with a "facts_cf.provides" statement in rule actions.
3) Iris-specific info is (still) stored in additional properties created on
the engine object :
engine.cf_var, .cube, .cube_parts, .requires, .rule_triggered, .filename
Our "rules" are just action routines.
The top-level 'run_actions' routine decides which actions to call, based on the
info recorded when processing each cube output. It does this in a simple
explicit way, which doesn't use any clever chaining, "trigger conditions" or
other rule-type logic.
Each 'action' function can replace several similar 'rules'.
E.G. 'action_provides_grid_mapping' replaces all 'fc_provides_grid_mapping_<X>'.
To aid debug, each returns a 'rule_name' string, indicating which original rule
this particular action call is emulating : In some cases, this may include a
textual note that this rule 'failed', aka "did not trigger", which would not be
recorded in the original implementation.
TODO: remove the use of intermediate "facts" to carry information between
actions. This mimics older behaviour, so is still useful while we are still
comparing behaviour with the old Pyke rules (debugging). But once that is no
longer useful, this can be considerably simplified.
"""
from functools import wraps
import warnings
from iris.config import get_logger
import iris.fileformats.cf
import iris.fileformats.pp as pp
from . import helpers as hh
# Configure the logger.
logger = get_logger(__name__, fmt="[%(funcName)s]")
def _default_rulenamesfunc(func_name):
# A simple default function to deduce the rules-name from an action-name.
funcname_prefix = "action_"
rulename_prefix = "fc_" # To match existing behaviours
rule_name = func_name
if rule_name.startswith(funcname_prefix):
rule_name = rule_name[len(funcname_prefix) :]
if not rule_name.startswith(rulename_prefix):
rule_name = rulename_prefix + rule_name
return rule_name
def action_function(func):
# Wrap an action function with some standard behaviour.
# Notably : engages with the rules logging process.
@wraps(func)
def inner(engine, *args, **kwargs):
# Call the original rules-func
rule_name = func(engine, *args, **kwargs)
if rule_name is None:
# Work out the corresponding rule name, and log it.
# Note: an action returns a name string, which identifies it,
# but also may vary depending on whether it successfully
# triggered, and if so what it matched.
rule_name = _default_rulenamesfunc(func.__name__)
engine.rule_triggered.add(rule_name)
func._rulenames_func = _default_rulenamesfunc
return inner
@action_function
def action_default(engine):
"""Standard operations for every cube."""
hh.build_cube_metadata(engine)
# Lookup table used by 'action_provides_grid_mapping'.
# Maps each supported CF grid-mapping-name to a pair of handling ("helper")
# routines:
# (@0) a validity-checker (or None)
# (@1) a coord-system builder function.
_GRIDTYPE_CHECKER_AND_BUILDER = {
hh.CF_GRID_MAPPING_LAT_LON: (None, hh.build_coordinate_system),
hh.CF_GRID_MAPPING_ROTATED_LAT_LON: (
None,
hh.build_rotated_coordinate_system,
),
hh.CF_GRID_MAPPING_MERCATOR: (
hh.has_supported_mercator_parameters,
hh.build_mercator_coordinate_system,
),
hh.CF_GRID_MAPPING_TRANSVERSE: (
None,
hh.build_transverse_mercator_coordinate_system,
),
hh.CF_GRID_MAPPING_STEREO: (
hh.has_supported_stereographic_parameters,
hh.build_stereographic_coordinate_system,
),
hh.CF_GRID_MAPPING_LAMBERT_CONFORMAL: (
None,
hh.build_lambert_conformal_coordinate_system,
),
hh.CF_GRID_MAPPING_LAMBERT_AZIMUTHAL: (
None,
hh.build_lambert_azimuthal_equal_area_coordinate_system,
),
hh.CF_GRID_MAPPING_ALBERS: (
None,
hh.build_albers_equal_area_coordinate_system,
),
hh.CF_GRID_MAPPING_VERTICAL: (
None,
hh.build_vertical_perspective_coordinate_system,
),
hh.CF_GRID_MAPPING_GEOSTATIONARY: (
None,
hh.build_geostationary_coordinate_system,
),
}
@action_function
def action_provides_grid_mapping(engine, gridmapping_fact):
"""Convert a CFGridMappingVariable into a cube coord-system."""
(var_name,) = gridmapping_fact
rule_name = "fc_provides_grid_mapping"
cf_var = engine.cf_var.cf_group[var_name]
grid_mapping_type = getattr(cf_var, hh.CF_ATTR_GRID_MAPPING_NAME, None)
succeed = True
if grid_mapping_type is None:
succeed = False
rule_name += " --FAILED(no grid-mapping attr)"
else:
grid_mapping_type = grid_mapping_type.lower()
if succeed:
if grid_mapping_type in _GRIDTYPE_CHECKER_AND_BUILDER:
checker, builder = _GRIDTYPE_CHECKER_AND_BUILDER[grid_mapping_type]
rule_name += f"_({grid_mapping_type})"
else:
succeed = False
rule_name += f" --FAILED(unhandled type {grid_mapping_type})"
if succeed:
if checker is not None and not checker(engine, var_name):
succeed = False
rule_name += f" --(FAILED check {checker.__name__})"
if succeed:
coordinate_system = builder(engine, cf_var)
engine.cube_parts["coordinate_system"] = coordinate_system
# Check there is not an existing one.
# ATM this is guaranteed by the caller, "run_actions".
assert engine.fact_list("grid-type") == []
engine.add_fact("grid-type", (grid_mapping_type,))
return rule_name
@action_function
def action_provides_coordinate(engine, dimcoord_fact):
"""Identify the coordinate 'type' of a CFCoordinateVariable."""
(var_name,) = dimcoord_fact
# Identify the "type" of a coordinate variable
coord_type = None
# NOTE: must test for rotated cases *first*, as 'is_longitude' and
# 'is_latitude' functions also accept rotated cases.
if hh.is_rotated_latitude(engine, var_name):
coord_type = "rotated_latitude"
elif hh.is_rotated_longitude(engine, var_name):
coord_type = "rotated_longitude"
elif hh.is_latitude(engine, var_name):
coord_type = "latitude"
elif hh.is_longitude(engine, var_name):
coord_type = "longitude"
elif hh.is_time(engine, var_name):
coord_type = "time"
elif hh.is_time_period(engine, var_name):
coord_type = "time_period"
elif hh.is_projection_x_coordinate(engine, var_name):
coord_type = "projection_x"
elif hh.is_projection_y_coordinate(engine, var_name):
coord_type = "projection_y"
if coord_type is None:
# Not identified as a specific known coord_type.
# N.B. in the original rules, this does *not* trigger separate
# 'provides' and 'build' phases : there is just a single
# 'fc_default_coordinate' rule.
# Rationalise this for now by making it more like the others.
# FOR NOW: ~matching old code, but they could *all* be simplified.
# TODO: combine 2 operation into 1 for ALL of these.
coord_type = "miscellaneous"
rule_name = "fc_default_coordinate_(provide-phase)"
else:
rule_name = f"fc_provides_coordinate_({coord_type})"
engine.add_fact("provides-coordinate-(oftype)", (coord_type, var_name))
return rule_name
# Lookup table used by 'action_build_dimension_coordinate'.
# Maps each supported coordinate-type name (a rules-internal concept) to a pair
# of information values :
# (@0) A grid "type", one of latlon/rotated/projected (or None)
# If set, the cube should have a coord-system, which is set on the
# resulting coordinate. If None, the coord has no coord_system.
# (@1) an (optional) fixed standard-name for the coordinate, or None
# If None, the coordinate name is copied from the source variable
_COORDTYPE_GRIDTYPES_AND_COORDNAMES = {
"latitude": ("latlon", hh.CF_VALUE_STD_NAME_LAT),
"longitude": ("latlon", hh.CF_VALUE_STD_NAME_LON),
"rotated_latitude": (
"rotated",
hh.CF_VALUE_STD_NAME_GRID_LAT,
),
"rotated_longitude": (
"rotated",
hh.CF_VALUE_STD_NAME_GRID_LON,
),
"projection_x": ("projected", hh.CF_VALUE_STD_NAME_PROJ_X),
"projection_y": ("projected", hh.CF_VALUE_STD_NAME_PROJ_Y),
"time": (None, None),
"time_period": (None, None),
"miscellaneous": (None, None),
}
@action_function
def action_build_dimension_coordinate(engine, providescoord_fact):
"""Convert a CFCoordinateVariable into a cube dim-coord."""
coord_type, var_name = providescoord_fact
cf_var = engine.cf_var.cf_group[var_name]
rule_name = f"fc_build_coordinate_({coord_type})"
coord_grid_class, coord_name = _COORDTYPE_GRIDTYPES_AND_COORDNAMES[
coord_type
]
if coord_grid_class is None:
# Coordinates not identified with a specific grid-type class (latlon,
# rotated or projected) are always built, but can have no coord-system.
coord_system = None # no coord-system can be used
succeed = True
else:
grid_classes = ("latlon", "rotated", "projected")
assert coord_grid_class in grid_classes
# If a coord is of a type identified with a grid, we may have a
# coordinate system (i.e. a valid grid-mapping).
# N.B. this requires each grid-type identification to validate the
# coord var (e.g. "is_longitude").
# Non-conforming lon/lat/projection coords will be classed as
# dim-coords by cf.py, but 'action_provides_coordinate' will give them
# a coord-type of 'miscellaneous' : hence, they have no coord-system.
coord_system = engine.cube_parts.get("coordinate_system")
# Translate the specific grid-mapping type to a grid-class
if coord_system is None:
succeed = True
cs_gridclass = None
else:
# Get a grid-class from the grid-type
# i.e. one of latlon/rotated/projected, as for coord_grid_class.
gridtypes_factlist = engine.fact_list("grid-type")
(gridtypes_fact,) = gridtypes_factlist # only 1 fact
(cs_gridtype,) = gridtypes_fact # fact contains 1 term
if cs_gridtype == "latitude_longitude":
cs_gridclass = "latlon"
elif cs_gridtype == "rotated_latitude_longitude":
cs_gridclass = "rotated"
else:
# Other specific projections
assert cs_gridtype is not None
cs_gridclass = "projected"
assert cs_gridclass in grid_classes + (None,)
if coord_grid_class == "latlon":
if cs_gridclass == "latlon":
succeed = True
elif cs_gridclass is None:
succeed = True
rule_name += "(no-cs)"
elif cs_gridclass == "rotated":
# We disallow this case
succeed = False
rule_name += "(FAILED : latlon coord with rotated cs)"
else:
assert cs_gridclass == "projected"
# succeed, no error, but discards the coord-system
# TODO: could issue a warning in this case ?
succeed = True
coord_system = None
rule_name += "(no-cs : discarded projected cs)"
elif coord_grid_class == "rotated":
if cs_gridclass == "rotated":
succeed = True
rule_name += "(rotated)"
elif cs_gridclass is None:
succeed = True
rule_name += "(rotated no-cs)"
elif cs_gridclass == "latlon":
# We disallow this case
succeed = False
rule_name += "(FAILED rotated coord with latlon cs)"
else:
assert cs_gridclass == "projected"
succeed = True
coord_system = None
rule_name += "(rotated no-cs : discarded projected cs)"
elif coord_grid_class == "projected":
# In this case, can *only* build a coord at all if there is a
# coord-system of the correct class (i.e. 'projected').
succeed = cs_gridclass == "projected"
if not succeed:
rule_name += "(FAILED projected coord with non-projected cs)"
else:
# Just FYI : literally not possible, as we already asserted this.
assert coord_grid_class in grid_classes
if succeed:
hh.build_dimension_coordinate(
engine, cf_var, coord_name=coord_name, coord_system=coord_system
)
return rule_name
@action_function
def action_build_auxiliary_coordinate(engine, auxcoord_fact):
"""Convert a CFAuxiliaryCoordinateVariable into a cube aux-coord."""
(var_name,) = auxcoord_fact
rule_name = "fc_build_auxiliary_coordinate"
# Identify any known coord "type" : latitude/longitude/time/time_period
# If latitude/longitude, this sets the standard_name of the built AuxCoord
coord_type = "" # unidentified : can be OK
coord_name = None
if hh.is_time(engine, var_name):
coord_type = "time"
elif hh.is_time_period(engine, var_name):
coord_type = "time_period"
elif hh.is_longitude(engine, var_name):
coord_type = "longitude"
if hh.is_rotated_longitude(engine, var_name):
coord_type += "_rotated"
coord_name = hh.CF_VALUE_STD_NAME_GRID_LON
else:
coord_name = hh.CF_VALUE_STD_NAME_LON
elif hh.is_latitude(engine, var_name):
coord_type = "latitude"
if hh.is_rotated_latitude(engine, var_name):
coord_type += "_rotated"
coord_name = hh.CF_VALUE_STD_NAME_GRID_LAT
else:
coord_name = hh.CF_VALUE_STD_NAME_LAT
if coord_type:
rule_name += f"_{coord_type}"
cf_var = engine.cf_var.cf_group.auxiliary_coordinates[var_name]
hh.build_auxiliary_coordinate(engine, cf_var, coord_name=coord_name)
return rule_name
@action_function
def action_ukmo_stash(engine):
"""Convert 'ukmo stash' cf property into a cube attribute."""
rule_name = "fc_attribute_ukmo__um_stash_source"
var = engine.cf_var
attr_name = "ukmo__um_stash_source"
attr_value = getattr(var, attr_name, None)
if attr_value is None:
attr_name = "um_stash_source" # legacy form
attr_value = getattr(var, attr_name, None)
if attr_value is None:
rule_name += "(NOT-TRIGGERED)"
else:
# No helper routine : just do it
try:
stash_code = pp.STASH.from_msi(attr_value)
except (TypeError, ValueError):
engine.cube.attributes[attr_name] = attr_value
msg = (
"Unable to set attribute STASH as not a valid MSI "
f'string "mXXsXXiXXX", got "{attr_value}"'
)
logger.debug(msg)
else:
engine.cube.attributes["STASH"] = stash_code
return rule_name
@action_function
def action_ukmo_processflags(engine):
"""Convert 'ukmo process flags' cf property into a cube attribute."""
rule_name = "fc_attribute_ukmo__process_flags"
var = engine.cf_var
attr_name = "ukmo__process_flags"
attr_value = getattr(var, attr_name, None)
if attr_value is None:
rule_name += "(NOT-TRIGGERED)"
else:
# No helper routine : just do it
flags = [x.replace("_", " ") for x in attr_value.split(" ")]
engine.cube.attributes["ukmo__process_flags"] = tuple(flags)
return rule_name
@action_function
def action_build_cell_measure(engine, cellm_fact):
"""Convert a CFCellMeasureVariable into a cube cell-measure."""
(var_name,) = cellm_fact
var = engine.cf_var.cf_group.cell_measures[var_name]
hh.build_cell_measures(engine, var)
@action_function
def action_build_ancil_var(engine, ancil_fact):
"""Convert a CFAncillaryVariable into a cube ancil-var."""
(var_name,) = ancil_fact
var = engine.cf_var.cf_group.ancillary_variables[var_name]
hh.build_ancil_var(engine, var)
@action_function
def action_build_label_coordinate(engine, label_fact):
"""Convert a CFLabelVariable into a cube string-type aux-coord."""
(var_name,) = label_fact
var = engine.cf_var.cf_group.labels[var_name]
hh.build_auxiliary_coordinate(engine, var)
@action_function
def action_formula_type(engine, formula_root_fact):
"""Register a CFVariable as a formula root."""
rule_name = "fc_formula_type"
(var_name,) = formula_root_fact
cf_var = engine.cf_var.cf_group[var_name]
# cf_var.standard_name is a formula type (or we should never get here).
formula_type = getattr(cf_var, "standard_name", None)
succeed = True
if formula_type not in iris.fileformats.cf.reference_terms:
succeed = False
rule_name += f"(FAILED - unrecognised formula type = {formula_type!r})"
msg = f"Ignored formula of unrecognised type: {formula_type!r}."
warnings.warn(msg)
if succeed:
# Check we don't already have one.
existing_type = engine.requires.get("formula_type")
if existing_type:
# NOTE: in this case, for now, we will accept the last appearing,
# which matches the older behaviour.
# TODO: this needs resolving, somehow.
succeed = False
msg = (
"Omitting factories for some hybrid coordinates, as multiple "
"hybrid coordinates on a single variable are not supported: "
f"Formula of type ={formula_type!r} "
f"overrides another of type ={existing_type!r}.)"
)
warnings.warn(msg)
rule_name += f"_{formula_type}"
# Set 'requires' info for iris.fileformats.netcdf._load_aux_factory.
engine.requires["formula_type"] = formula_type
return rule_name
@action_function
def action_formula_term(engine, formula_term_fact):
"""Register a CFVariable as a formula term."""
# Must run AFTER formula root identification.
(termvar_name, rootvar_name, term_name) = formula_term_fact
# The rootname is implicit : have only one per cube
# TODO: change when we adopt cf-1.7 advanced grid-mapping syntax
engine.requires.setdefault("formula_terms", {})[term_name] = termvar_name
rule_name = f"fc_formula_term({term_name})"
return rule_name
def run_actions(engine):
"""
Run all actions for a cube.
This is the top-level "activation" function which runs all the appropriate
rules actions to translate facts and build all the cube elements.
The specific cube being translated is "engine.cube".
"""
# default (all cubes) action, always runs
action_default(engine) # This should run the default rules.
# deal with grid-mappings
grid_mapping_facts = engine.fact_list("grid_mapping")
# For now, there should be at most *one* of these.
assert len(grid_mapping_facts) in (0, 1)
for grid_mapping_fact in grid_mapping_facts:
action_provides_grid_mapping(engine, grid_mapping_fact)
# identify + record aka "PROVIDE" specific named coordinates
# N.B. cf.py has identified that these are dim-coords, NOT aux-coords
# (which are recorded separately).
# TODO: can probably remove this step ??
dimcoord_facts = engine.fact_list("coordinate")
for dimcoord_fact in dimcoord_facts:
action_provides_coordinate(engine, dimcoord_fact)
# build (dimension) coordinates
# The 'provides' step and the grid-mapping must have already been done.
providescoord_facts = engine.fact_list("provides-coordinate-(oftype)")
for providescoord_fact in providescoord_facts:
action_build_dimension_coordinate(engine, providescoord_fact)
# build aux-coords
auxcoord_facts = engine.fact_list("auxiliary_coordinate")
for auxcoord_fact in auxcoord_facts:
action_build_auxiliary_coordinate(engine, auxcoord_fact)
# Detect + process and special 'ukmo' attributes
# Run on every cube : they choose themselves whether to trigger.
action_ukmo_stash(engine)
action_ukmo_processflags(engine)
# cell measures
cellm_facts = engine.fact_list("cell_measure")
for cellm_fact in cellm_facts:
action_build_cell_measure(engine, cellm_fact)
# ancillary variables
ancil_facts = engine.fact_list("ancillary_variable")
for ancil_fact in ancil_facts:
action_build_ancil_var(engine, ancil_fact)
# label coords
label_facts = engine.fact_list("label")
for label_fact in label_facts:
action_build_label_coordinate(engine, label_fact)
# formula root variables
formula_root_facts = engine.fact_list("formula_root")
for root_fact in formula_root_facts:
action_formula_type(engine, root_fact)
# formula terms
# The 'formula_root's must have already been done.
formula_term_facts = engine.fact_list("formula_term")
for term_fact in formula_term_facts:
action_formula_term(engine, term_fact)
|
rcomer/iris
|
lib/iris/fileformats/_nc_load_rules/actions.py
|
Python
|
lgpl-3.0
| 21,897
|
[
"NetCDF"
] |
dc21ed5811c49fb917b6e0c3a7947b920b5e1c31c92b071d2b09d13e64ef9a87
|
"""
Convenience functions for the construction of spatial weights based on
contiguity and distance criteria.
"""
__author__ = "Sergio J. Rey <srey@asu.edu> "
import pysal
from Contiguity import buildContiguity
from Distance import knnW, Kernel, DistanceBand
from util import get_ids, get_points_array_from_shapefile, min_threshold_distance
import numpy as np
__all__ = ['queen_from_shapefile', 'rook_from_shapefile', 'knnW_from_array',
'knnW_from_shapefile', 'threshold_binaryW_from_array',
'threshold_binaryW_from_shapefile', 'threshold_continuousW_from_array',
'threshold_continuousW_from_shapefile', 'kernelW', 'kernelW_from_shapefile',
'adaptive_kernelW', 'adaptive_kernelW_from_shapefile',
'min_threshold_dist_from_shapefile', 'build_lattice_shapefile']
def queen_from_shapefile(shapefile, idVariable=None, sparse=False):
"""
Queen contiguity weights from a polygon shapefile.
Parameters
----------
shapefile : string
name of polygon shapefile including suffix.
idVariable : string
name of a column in the shapefile's DBF to use for ids.
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> wq=queen_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> "%.3f"%wq.pct_nonzero
'9.829'
>>> wq=queen_from_shapefile(pysal.examples.get_path("columbus.shp"),"POLYID")
>>> "%.3f"%wq.pct_nonzero
'9.829'
>>> wq=queen_from_shapefile(pysal.examples.get_path("columbus.shp"), sparse=True)
>>> pct_sp = wq.sparse.nnz *1. / wq.n**2
>>> "%.3f"%pct_sp
'0.098'
Notes
-----
Queen contiguity defines as neighbors any pair of polygons that share at
least one vertex in their polygon definitions.
See Also
--------
:class:`pysal.weights.W`
"""
shp = pysal.open(shapefile)
w = buildContiguity(shp, criterion='queen')
if idVariable:
ids = get_ids(shapefile, idVariable)
w.remap_ids(ids)
else:
ids = None
shp.close()
w.set_shapefile(shapefile, idVariable)
if sparse:
w = pysal.weights.WSP(w.sparse, id_order=ids)
return w
def rook_from_shapefile(shapefile, idVariable=None, sparse=False):
"""
Rook contiguity weights from a polygon shapefile.
Parameters
----------
shapefile : string
name of polygon shapefile including suffix.
idVariable: string
name of a column in the shapefile's DBF to use for ids
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> wr=rook_from_shapefile(pysal.examples.get_path("columbus.shp"), "POLYID")
>>> "%.3f"%wr.pct_nonzero
'8.330'
>>> wr=rook_from_shapefile(pysal.examples.get_path("columbus.shp"), sparse=True)
>>> pct_sp = wr.sparse.nnz *1. / wr.n**2
>>> "%.3f"%pct_sp
'0.083'
Notes
-----
Rook contiguity defines as neighbors any pair of polygons that share a
common edge in their polygon definitions.
See Also
--------
:class:`pysal.weights.W`
"""
shp = pysal.open(shapefile)
w = buildContiguity(shp, criterion='rook')
if idVariable:
ids = get_ids(shapefile, idVariable)
w.remap_ids(ids)
else:
ids = None
shp.close()
w.set_shapefile(shapefile, idVariable)
if sparse:
w = pysal.weights.WSP(w.sparse, id_order=ids)
return w
def spw_from_gal(galfile):
"""
Sparse scipy matrix for w from a gal file.
Parameters
----------
galfile : string
name of gal file including suffix
Returns
-------
spw : sparse_matrix
scipy sparse matrix in CSR format
ids : array
identifiers for rows/cols of spw
Examples
--------
>>> spw = pysal.weights.user.spw_from_gal(pysal.examples.get_path("sids2.gal"))
>>> spw.sparse.nnz
462
"""
return pysal.open(galfile, 'r').read(sparse=True)
# Distance based weights
def knnW_from_array(array, k=2, p=2, ids=None, radius=None):
"""
Nearest neighbor weights from a numpy array.
Parameters
----------
data : array
(n,m)
attribute data, n observations on m attributes
k : int
number of nearest neighbors
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
ids : list
identifiers to attach to each observation
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with binary weights.
Examples
--------
>>> import numpy as np
>>> x,y=np.indices((5,5))
>>> x.shape=(25,1)
>>> y.shape=(25,1)
>>> data=np.hstack([x,y])
>>> wnn2=knnW_from_array(data,k=2)
>>> wnn4=knnW_from_array(data,k=4)
>>> set([1, 5, 6, 2]) == set(wnn4.neighbors[0])
True
>>> set([0, 1, 10, 6]) == set(wnn4.neighbors[5])
True
>>> set([1, 5]) == set(wnn2.neighbors[0])
True
>>> set([0,6]) == set(wnn2.neighbors[5])
True
>>> "%.2f"%wnn2.pct_nonzero
'8.00'
>>> wnn4.pct_nonzero
16.0
>>> wnn4=knnW_from_array(data,k=4)
>>> set([ 1,5,6,2]) == set(wnn4.neighbors[0])
True
Notes
-----
Ties between neighbors of equal distance are arbitrarily broken.
See Also
--------
:class:`pysal.weights.W`
"""
if radius is not None:
kdtree = pysal.cg.KDTree(array, distance_metric='Arc', radius=radius)
else:
kdtree = pysal.cg.KDTree(array)
return knnW(kdtree, k=k, p=p, ids=ids)
def knnW_from_shapefile(shapefile, k=2, p=2, idVariable=None, radius=None):
"""
Nearest neighbor weights from a shapefile.
Parameters
----------
shapefile : string
shapefile name with shp suffix
k : int
number of nearest neighbors
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with binary weights
Examples
--------
Polygon shapefile
>>> wc=knnW_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> "%.4f"%wc.pct_nonzero
'4.0816'
>>> set([2,1]) == set(wc.neighbors[0])
True
>>> wc3=pysal.knnW_from_shapefile(pysal.examples.get_path("columbus.shp"),k=3)
>>> set(wc3.neighbors[0]) == set([2,1,3])
True
>>> set(wc3.neighbors[2]) == set([4,3,0])
True
1 offset rather than 0 offset
>>> wc3_1=knnW_from_shapefile(pysal.examples.get_path("columbus.shp"),k=3,idVariable="POLYID")
>>> set([4,3,2]) == set(wc3_1.neighbors[1])
True
>>> wc3_1.weights[2]
[1.0, 1.0, 1.0]
>>> set([4,1,8]) == set(wc3_1.neighbors[2])
True
Point shapefile
>>> w=knnW_from_shapefile(pysal.examples.get_path("juvenile.shp"))
>>> w.pct_nonzero
1.1904761904761905
>>> w1=knnW_from_shapefile(pysal.examples.get_path("juvenile.shp"),k=1)
>>> "%.3f"%w1.pct_nonzero
'0.595'
>>>
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
Ties between neighbors of equal distance are arbitrarily broken.
See Also
--------
:class:`pysal.weights.W`
"""
data = get_points_array_from_shapefile(shapefile)
if radius is not None:
kdtree = pysal.cg.KDTree(data, distance_metric='Arc', radius=radius)
else:
kdtree = pysal.cg.KDTree(data)
if idVariable:
ids = get_ids(shapefile, idVariable)
return knnW(kdtree, k=k, p=p, ids=ids)
return knnW(kdtree, k=k, p=p)
def threshold_binaryW_from_array(array, threshold, p=2, radius=None):
"""
Binary weights based on a distance threshold.
Parameters
----------
array : array
(n,m)
attribute data, n observations on m attributes
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance
Weights object with binary weights
Examples
--------
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> wcheck = pysal.W({0: [1, 3], 1: [0, 3, ], 2: [], 3: [1, 0], 4: [5], 5: [4]})
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> w=threshold_binaryW_from_array(points,threshold=11.2)
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> pysal.weights.util.neighbor_equality(w, wcheck)
True
>>>
"""
if radius is not None:
array = pysal.cg.KDTree(array, distance_metric='Arc', radius=radius)
return DistanceBand(array, threshold=threshold, p=p)
def threshold_binaryW_from_shapefile(shapefile, threshold, p=2, idVariable=None, radius=None):
"""
Threshold distance based binary weights from a shapefile.
Parameters
----------
shapefile : string
shapefile name with shp suffix
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance
Weights object with binary weights
Examples
--------
>>> w = threshold_binaryW_from_shapefile(pysal.examples.get_path("columbus.shp"),0.62,idVariable="POLYID")
>>> w.weights[1]
[1, 1]
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
data = get_points_array_from_shapefile(shapefile)
if radius is not None:
data = pysal.cg.KDTree(data, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
w = DistanceBand(data, threshold=threshold, p=p)
w.remap_ids(ids)
return w
return threshold_binaryW_from_array(data, threshold, p=p)
def threshold_continuousW_from_array(array, threshold, p=2,
alpha=-1, radius=None):
"""
Continuous weights based on a distance threshold.
Parameters
----------
array : array
(n,m)
attribute data, n observations on m attributes
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
alpha : float
distance decay parameter for weight (default -1.0)
if alpha is positive the weights will not decline with
distance.
radius : If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with continuous weights.
Examples
--------
inverse distance weights
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> wid=threshold_continuousW_from_array(points,11.2)
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> wid.weights[0]
[0.10000000000000001, 0.089442719099991588]
gravity weights
>>> wid2=threshold_continuousW_from_array(points,11.2,alpha=-2.0)
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> wid2.weights[0]
[0.01, 0.0079999999999999984]
"""
if radius is not None:
array = pysal.cg.KDTree(array, distance_metric='Arc', radius=radius)
w = DistanceBand(
array, threshold=threshold, p=p, alpha=alpha, binary=False)
return w
def threshold_continuousW_from_shapefile(shapefile, threshold, p=2,
alpha=-1, idVariable=None, radius=None):
"""
Threshold distance based continuous weights from a shapefile.
Parameters
----------
shapefile : string
shapefile name with shp suffix
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
alpha : float
distance decay parameter for weight (default -1.0)
if alpha is positive the weights will not decline with
distance.
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with continuous weights.
Examples
--------
>>> w = threshold_continuousW_from_shapefile(pysal.examples.get_path("columbus.shp"),0.62,idVariable="POLYID")
>>> w.weights[1]
[1.6702346893743334, 1.7250729841938093]
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
data = get_points_array_from_shapefile(shapefile)
if radius is not None:
data = pysal.cg.KDTree(data, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
w = DistanceBand(data, threshold=threshold, p=p, alpha=alpha, binary=False)
w.remap_ids(ids)
else:
w = threshold_continuousW_from_array(data, threshold, p=p, alpha=alpha)
w.set_shapefile(shapefile,idVariable)
return w
# Kernel Weights
def kernelW(points, k=2, function='triangular', fixed=True,
radius=None, diagonal=False):
"""
Kernel based weights.
Parameters
----------
points : array
(n,k)
n observations on k characteristics used to measure
distances between the n objects
k : int
the number of nearest neighbors to use for determining
bandwidth. Bandwidth taken as :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation).
function : {'triangular','uniform','quadratic','epanechnikov','quartic','bisquare','gaussian'}
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
epanechnikov
.. math::
K(z) = (1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
bisquare
.. math::
K(z) = (1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
fixed : boolean
If true then :math:`h_i=h \\forall i`. If false then
bandwidth is adaptive across observations.
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> kw=kernelW(points)
>>> kw.weights[0]
[1.0, 0.500000049999995, 0.4409830615267465]
>>> kw.neighbors[0]
[0, 1, 3]
>>> kw.bandwidth
array([[ 20.000002],
[ 20.000002],
[ 20.000002],
[ 20.000002],
[ 20.000002],
[ 20.000002]])
use different k
>>> kw=kernelW(points,k=3)
>>> kw.neighbors[0]
[0, 1, 3, 4]
>>> kw.bandwidth
array([[ 22.36068201],
[ 22.36068201],
[ 22.36068201],
[ 22.36068201],
[ 22.36068201],
[ 22.36068201]])
Diagonals to 1.0
>>> kq = kernelW(points,function='gaussian')
>>> kq.weights
{0: [0.3989422804014327, 0.35206533556593145, 0.3412334260702758], 1: [0.35206533556593145, 0.3989422804014327, 0.2419707487162134, 0.3412334260702758, 0.31069657591175387], 2: [0.2419707487162134, 0.3989422804014327, 0.31069657591175387], 3: [0.3412334260702758, 0.3412334260702758, 0.3989422804014327, 0.3011374490937829, 0.26575287272131043], 4: [0.31069657591175387, 0.31069657591175387, 0.3011374490937829, 0.3989422804014327, 0.35206533556593145], 5: [0.26575287272131043, 0.35206533556593145, 0.3989422804014327]}
>>> kqd = kernelW(points, function='gaussian', diagonal=True)
>>> kqd.weights
{0: [1.0, 0.35206533556593145, 0.3412334260702758], 1: [0.35206533556593145, 1.0, 0.2419707487162134, 0.3412334260702758, 0.31069657591175387], 2: [0.2419707487162134, 1.0, 0.31069657591175387], 3: [0.3412334260702758, 0.3412334260702758, 1.0, 0.3011374490937829, 0.26575287272131043], 4: [0.31069657591175387, 0.31069657591175387, 0.3011374490937829, 1.0, 0.35206533556593145], 5: [0.26575287272131043, 0.35206533556593145, 1.0]}
"""
if radius is not None:
points = pysal.cg.KDTree(points, distance_metric='Arc', radius=radius)
return Kernel(points, function=function, k=k, fixed=fixed,
diagonal=diagonal)
def kernelW_from_shapefile(shapefile, k=2, function='triangular',
idVariable=None, fixed=True, radius=None, diagonal=False):
"""
Kernel based weights.
Parameters
----------
shapefile : string
shapefile name with shp suffix
k : int
the number of nearest neighbors to use for determining
bandwidth. Bandwidth taken as :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation).
function : {'triangular','uniform','quadratic','epanechnikov', 'quartic','bisquare','gaussian'}
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
epanechnikov
.. math::
K(z) = (1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
bisquare
.. math::
K(z) = (1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
idVariable : string
name of a column in the shapefile's DBF to use for ids
fixed : binary
If true then :math:`h_i=h \\forall i`. If false then
bandwidth is adaptive across observations.
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> kw = pysal.kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"),idVariable='POLYID', function = 'gaussian')
>>> kwd = pysal.kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"),idVariable='POLYID', function = 'gaussian', diagonal = True)
>>> set(kw.neighbors[1]) == set([4, 2, 3, 1])
True
>>> set(kwd.neighbors[1]) == set([4, 2, 3, 1])
True
>>>
>>> set(kw.weights[1]) == set( [0.2436835517263174, 0.29090631630909874, 0.29671172124745776, 0.3989422804014327])
True
>>> set(kwd.weights[1]) == set( [0.2436835517263174, 0.29090631630909874, 0.29671172124745776, 1.0])
True
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
points = get_points_array_from_shapefile(shapefile)
if radius is not None:
points = pysal.cg.KDTree(points, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
return Kernel(points, function=function, k=k, ids=ids, fixed=fixed,
diagonal = diagonal)
return kernelW(points, k=k, function=function, fixed=fixed,
diagonal=diagonal)
def adaptive_kernelW(points, bandwidths=None, k=2, function='triangular',
radius=None, diagonal=False):
"""
Kernel weights with adaptive bandwidths.
Parameters
----------
points : array
(n,k)
n observations on k characteristics used to measure
distances between the n objects
bandwidths : float
or array-like (optional)
the bandwidth :math:`h_i` for the kernel.
if no bandwidth is specified k is used to determine the
adaptive bandwidth
k : int
the number of nearest neighbors to use for determining
bandwidth. For fixed bandwidth, :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation). For adaptive bandwidths, :math:`h_i=dknn_i`
function : {'triangular','uniform','quadratic','quartic','gaussian'}
kernel function defined as follows with
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
User specified bandwidths
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> bw=[25.0,15.0,25.0,16.0,14.5,25.0]
>>> kwa=adaptive_kernelW(points,bandwidths=bw)
>>> kwa.weights[0]
[1.0, 0.6, 0.552786404500042, 0.10557280900008403]
>>> kwa.neighbors[0]
[0, 1, 3, 4]
>>> kwa.bandwidth
array([[ 25. ],
[ 15. ],
[ 25. ],
[ 16. ],
[ 14.5],
[ 25. ]])
Endogenous adaptive bandwidths
>>> kwea=adaptive_kernelW(points)
>>> kwea.weights[0]
[1.0, 0.10557289844279438, 9.99999900663795e-08]
>>> kwea.neighbors[0]
[0, 1, 3]
>>> kwea.bandwidth
array([[ 11.18034101],
[ 11.18034101],
[ 20.000002 ],
[ 11.18034101],
[ 14.14213704],
[ 18.02775818]])
Endogenous adaptive bandwidths with Gaussian kernel
>>> kweag=adaptive_kernelW(points,function='gaussian')
>>> kweag.weights[0]
[0.3989422804014327, 0.2674190291577696, 0.2419707487162134]
>>> kweag.bandwidth
array([[ 11.18034101],
[ 11.18034101],
[ 20.000002 ],
[ 11.18034101],
[ 14.14213704],
[ 18.02775818]])
with diagonal
>>> kweag = pysal.adaptive_kernelW(points, function='gaussian')
>>> kweagd = pysal.adaptive_kernelW(points, function='gaussian', diagonal=True)
>>> kweag.neighbors[0]
[0, 1, 3]
>>> kweagd.neighbors[0]
[0, 1, 3]
>>> kweag.weights[0]
[0.3989422804014327, 0.2674190291577696, 0.2419707487162134]
>>> kweagd.weights[0]
[1.0, 0.2674190291577696, 0.2419707487162134]
"""
if radius is not None:
points = pysal.cg.KDTree(points, distance_metric='Arc', radius=radius)
return Kernel(points, bandwidth=bandwidths, fixed=False, k=k,
function=function, diagonal=diagonal)
def adaptive_kernelW_from_shapefile(shapefile, bandwidths=None, k=2, function='triangular',
idVariable=None, radius=None,
diagonal = False):
"""
Kernel weights with adaptive bandwidths.
Parameters
----------
shapefile : string
shapefile name with shp suffix
bandwidths : float
or array-like (optional)
the bandwidth :math:`h_i` for the kernel.
if no bandwidth is specified k is used to determine the
adaptive bandwidth
k : int
the number of nearest neighbors to use for determining
bandwidth. For fixed bandwidth, :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation). For adaptive bandwidths, :math:`h_i=dknn_i`
function : {'triangular','uniform','quadratic','quartic','gaussian'}
kernel function defined as follows with
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> kwa = pysal.adaptive_kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"), function='gaussian')
>>> kwad = pysal.adaptive_kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"), function='gaussian', diagonal=True)
>>> kwa.neighbors[0]
[0, 2, 1]
>>> kwad.neighbors[0]
[0, 2, 1]
>>> kwa.weights[0]
[0.3989422804014327, 0.24966013701844503, 0.2419707487162134]
>>> kwad.weights[0]
[1.0, 0.24966013701844503, 0.2419707487162134]
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
points = get_points_array_from_shapefile(shapefile)
if radius is not None:
points = pysal.cg.KDTree(points, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
return Kernel(points, bandwidth=bandwidths, fixed=False, k=k,
function=function, ids=ids, diagonal=diagonal)
return adaptive_kernelW(points, bandwidths=bandwidths, k=k,
function=function, diagonal=diagonal)
def min_threshold_dist_from_shapefile(shapefile, radius=None, p=2):
"""
Kernel weights with adaptive bandwidths.
Parameters
----------
shapefile : string
shapefile name with shp suffix.
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
Returns
-------
d : float
Maximum nearest neighbor distance between the n
observations.
Examples
--------
>>> md = min_threshold_dist_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> md
0.61886415807685413
>>> min_threshold_dist_from_shapefile(pysal.examples.get_path("stl_hom.shp"), pysal.cg.sphere.RADIUS_EARTH_MILES)
31.846942936393717
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
points = get_points_array_from_shapefile(shapefile)
if radius is not None:
kdt = pysal.cg.kdtree.Arc_KDTree(points, radius=radius)
nn = kdt.query(kdt.data, k=2)
nnd = nn[0].max(axis=0)[1]
return nnd
return min_threshold_distance(points, p)
def build_lattice_shapefile(nrows, ncols, outFileName):
"""
Build a lattice shapefile with nrows rows and ncols cols.
Parameters
----------
nrows : int
Number of rows
ncols : int
Number of cols
outFileName : str
shapefile name with shp suffix
Returns
-------
None
"""
if not outFileName.endswith('.shp'):
raise ValueError("outFileName must end with .shp")
o = pysal.open(outFileName, 'w')
dbf_name = outFileName.split(".")[0] + ".dbf"
d = pysal.open(dbf_name, 'w')
d.header = [ 'ID' ]
d.field_spec = [ ('N', 8, 0) ]
c = 0
for i in xrange(nrows):
for j in xrange(ncols):
ll = i, j
ul = i, j + 1
ur = i + 1, j + 1
lr = i + 1, j
o.write(pysal.cg.Polygon([ll, ul, ur, lr, ll]))
d.write([c])
c += 1
d.close()
o.close()
def _test():
import doctest
# the following line could be used to define an alternative to the '<BLANKLINE>' flag
#doctest.BLANKLINE_MARKER = 'something better than <BLANKLINE>'
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
|
jlaura/pysal
|
pysal/weights/user.py
|
Python
|
bsd-3-clause
| 34,513
|
[
"COLUMBUS",
"Gaussian"
] |
0141594bb49f37cdada2a54866e3c3491169c2ab65f5f9f78024c561f95cc362
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Simulate a Lennard-Jones fluid maintained at a fixed temperature
by a Langevin thermostat. Shows the basic features of how to:
* set up system parameters, particles and interactions.
* warm up and integrate.
* write parameters, configurations and observables to files.
The particles in the system are of two types: type 0 and type 1.
Type 0 particles interact with each other via a repulsive WCA
interaction. Type 1 particles neither interact with themselves
nor with type 0 particles.
"""
import numpy as np
import espressomd
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
print("""
=======================================================
= lj_liquid.py =
=======================================================
""")
# System parameters
#############################################################
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard-Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 2.5 * lj_sig
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l] * 3)
np.random.seed(seed=42)
system.time_step = 0.01
system.cell_system.skin = 0.4
# warmup integration (steepest descent)
warm_steps = 20
warm_n_times = 10
# convergence criterion (particles are separated by at least 90% sigma)
min_dist = 0.9 * lj_sig
# integration
int_steps = 1000
int_n_times = 5
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l**3
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
print("Simulate {} particles in a cubic box of length {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.min_dist()
print("Start with minimal distance {}".format(act_min_dist))
#############################################################
# Warmup Integration #
#############################################################
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
print(system.non_bonded_inter[0, 0].lennard_jones)
# minimize energy using min_dist as the convergence criterion
system.integrator.set_steepest_descent(f_max=0, gamma=1e-3,
max_displacement=lj_sig / 100)
i = 0
while i < warm_n_times and system.analysis.min_dist() < min_dist:
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
system.integrator.run(warm_steps)
i += 1
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
print()
system.integrator.set_vv()
# activate thermostat
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# Just to see what else we may get from the C++ core
import pprint
pprint.pprint(system.cell_system.get_state(), width=1)
# pprint.pprint(system.part.__getstate__(), width=1)
pprint.pprint(system.__getstate__())
#############################################################
# Integration #
#############################################################
print("\nStart integration: run {} times {} steps"
.format(int_n_times, int_steps))
for i in range(int_n_times):
print("run {} at time={:.2f}".format(i, system.time))
system.integrator.run(steps=int_steps)
energies = system.analysis.energy()
print(energies['total'])
linear_momentum = system.analysis.linear_momentum()
print(linear_momentum)
# terminate program
print("\nFinished.")
|
KaiSzuttor/espresso
|
samples/lj_liquid.py
|
Python
|
gpl-3.0
| 4,979
|
[
"ESPResSo"
] |
c7a0ed224cb7e80cfb38f01eb75b92ec0fe0864faef0af7ed5188637c774b11f
|
from django.conf.urls import url
from crystal_dashboard.dashboards.crystal.rings.storage_policies import views
urlpatterns = [
url(r'^create_storage_policy', views.CreateStoragePolicy.as_view(),
name='create_storage_policy'),
url(r'^update_storage_policy/(?P<id>[^/]+)/$', views.UpdateStoragePolicy.as_view(), name='update_storage_policy'),
url(r'^create_ec_storage_policy', views.CreateECStoragePolicy.as_view(),
name='create_ec_storage_policy'),
url(r'^(?P<policy_id>[^/]+)/devices/$', views.ManageDisksView.as_view(),
name='devices'),
url(r'^(?P<policy_id>[^/]+)/add_devices/$', views.AddDisksView.as_view(),
name='add_devices'),
]
|
Crystal-SDS/dashboard
|
crystal_dashboard/dashboards/crystal/rings/storage_policies/urls.py
|
Python
|
gpl-3.0
| 688
|
[
"CRYSTAL"
] |
ae37c1749f9b3072b138688ceca677c0963ab068010e5117dd0e2545cd8971a6
|
#! CCSD dipole with user-specified basis set
import psi4
psi4.set_output_file("output.dat", False)
h2o = psi4.geometry("""
0 1
H
O 1 0.957
H 2 0.957 1 104.5
""")
psi4.set_options({'freeze_core': 'false'})
psi4.basis_helper("""
# Sadlej-pVTZ
spherical
****
H 0
S 4 1.00
33.8650140000 0.0060680000
5.0947880000 0.0453160000
1.1587860000 0.2028460000
0.3258400000 0.5037090000
S 1 1.00
0.1027410000 1.0000000000
S 1 1.00
0.0324000000 1.0000000000
P 2 1.00
1.1588000000 0.1884400000
0.3258000000 0.8824200000
P 2 1.00
0.1027000000 0.1178000000
0.0324000000 0.0042000000
****
C 0
S 5 1.00
5240.6353000000 0.0009370000
782.2048000000 0.0072280000
178.3508300000 0.0363440000
50.8159420000 0.1306000000
16.8235620000 0.3189310000
S 2 1.00
6.1757760000 0.4387420000
2.4180490000 0.2149740000
S 1 1.00
0.5119000000 1.0000000000
S 1 1.00
0.1565900000 1.0000000000
S 1 1.00
0.0479000000 1.0000000000
P 4 1.00
18.8418000000 0.0138870000
4.1592400000 0.0862790000
1.2067100000 0.2887440000
0.3855400000 0.4994110000
P 1 1.00
0.1219400000 1.0000000000
P 1 1.00
0.0385680000 1.0000000000
D 2 1.00
1.2067000000 0.2628500000
0.3855000000 0.8043000000
D 2 1.00
0.1219000000 0.6535000000
0.0386000000 0.8636000000
****
O 0
S 5 1.00
10662.2850000000 0.0007990000
1599.7097000000 0.0061530000
364.7252600000 0.0311570000
103.6517900000 0.1155960000
33.9058050000 0.3015520000
S 2 1.00
12.2874690000 0.4448700000
4.7568050000 0.2431720000
S 1 1.00
1.0042710000 1.0000000000
S 1 1.00
0.3006860000 1.0000000000
S 1 1.00
0.0900300000 1.0000000000
P 4 1.00
34.8564630000 0.0156480000
7.8431310000 0.0981970000
2.3062490000 0.3077680000
0.7231640000 0.4924700000
P 1 1.00
0.2148820000 1.0000000000
P 1 1.00
0.0638500000 1.0000000000
D 2 1.00
2.3062000000 0.2027000000
0.7232000000 0.5791000000
D 2 1.00
0.2149000000 0.7854500000
0.0639000000 0.5338700000
****
""")
ccsd_e, wfn = psi4.properties('ccsd',properties=['dipole'],return_wfn=True)
psi4.oeprop(wfn,"DIPOLE", "QUADRUPOLE", title="(OEPROP)CC")
psi4.compare_values(psi4.get_variable("(OEPROP)CC DIPOLE X"), 0.000000000000,6,"CC DIPOLE X") #TEST
psi4.compare_values(psi4.get_variable("(OEPROP)CC DIPOLE Y"), 0.000000000000,6,"CC DIPOLE Y") #TEST
psi4.compare_values(psi4.get_variable("(OEPROP)CC DIPOLE Z"),-1.840334899884,6,"CC DIPOLE Z") #TEST
psi4.compare_values(psi4.get_variable("(OEPROP)CC QUADRUPOLE XX"),-7.864006962064,6,"CC QUADRUPOLE XX") #TEST
psi4.compare_values(psi4.get_variable("(OEPROP)CC QUADRUPOLE XY"), 0.000000000000,6,"CC QUADRUPOLE XY") #TEST
psi4.compare_values(psi4.get_variable("(OEPROP)CC QUADRUPOLE XZ"), 0.000000000000,6,"CC QUADRUPOLE XZ") #TEST
psi4.compare_values(psi4.get_variable("(OEPROP)CC QUADRUPOLE YY"),-4.537386915305,6,"CC QUADRUPOLE YY") #TEST
psi4.compare_values(psi4.get_variable("(OEPROP)CC QUADRUPOLE YZ"), 0.000000000000,6,"CC QUADRUPOLE YZ") #TEST
psi4.compare_values(psi4.get_variable("(OEPROP)CC QUADRUPOLE ZZ"),-6.325836255265,6,"CC QUADRUPOLE ZZ") #TEST
psi4.core.print_variables()
|
amjames/psi4
|
tests/python/cc54/input.py
|
Python
|
lgpl-3.0
| 3,948
|
[
"Psi4"
] |
af7df7a44ec2071ab828fca3559d3b26639d121dad0def4282635ac97010e2ba
|
# SONA imports
from .generators.noise import PulseGenerator
from .generators.noise import NoiseGenerator
from .generators.noise import SineOscillator
from .generators.noise import ColoredNoise
from .generators.operators import Product
from .interface import play
from .interface import Player
from . import macros
# Numpy/Scipy imports
from scipy.signal import gaussian
|
gpenazzi/sona
|
src/sona/__init__.py
|
Python
|
bsd-3-clause
| 371
|
[
"Gaussian"
] |
1c385891d8cf87a1b2ff36b205fa956c0729625c060e58b179d181b4c035ef85
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
from pyscf.lib.parameters import BOHR
unknown = 1.999999
#########################
# JCP 41 3199 (1964).
BRAGG = 1/BOHR * numpy.array((unknown, # Ghost atom
0.35, 1.40, # 1s
1.45, 1.05, 0.85, 0.70, 0.65, 0.60, 0.50, 1.50, # 2s2p
1.80, 1.50, 1.25, 1.10, 1.00, 1.00, 1.00, 1.80, # 3s3p
2.20, 1.80, # 4s
1.60, 1.40, 1.35, 1.40, 1.40, 1.40, 1.35, 1.35, 1.35, 1.35, # 3d
1.30, 1.25, 1.15, 1.15, 1.15, 1.90, # 4p
2.35, 2.00, # 5s
1.80, 1.55, 1.45, 1.45, 1.35, 1.30, 1.35, 1.40, 1.60, 1.55, # 4d
1.55, 1.45, 1.45, 1.40, 1.40, 2.10, # 5p
2.60, 2.15, # 6s
1.95, 1.85, 1.85, 1.85, 1.85, 1.85, 1.85, # La, Ce-Eu
1.80, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75, # Gd, Tb-Lu
1.55, 1.45, 1.35, 1.35, 1.30, 1.35, 1.35, 1.35, 1.50, # 5d
1.90, 1.80, 1.60, 1.90, 1.45, 2.10, # 6p
1.80, 2.15, # 7s
1.95, 1.80, 1.80, 1.75, 1.75, 1.75, 1.75,
1.75, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75,
1.75, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75,
1.75, 1.75, 1.75, 1.75, 1.75, 1.75,
1.75, 1.75,
1.75, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75))
# from Gerald Knizia's CtDftGrid, which is based on
# http://en.wikipedia.org/wiki/Covalent_radius
# and
# Beatriz Cordero, Veronica Gomez, Ana E. Platero-Prats, Marc Reves,
# Jorge Echeverria, Eduard Cremades, Flavia Barragan and Santiago
# Alvarez. Covalent radii revisited. Dalton Trans., 2008, 2832-2838,
# doi:10.1039/b801115j
COVALENT = 1/BOHR * numpy.array((unknown, # Ghost atom
0.31, 0.28, # 1s
1.28, 0.96, 0.84, 0.73, 0.71, 0.66, 0.57, 0.58, # 2s2p
1.66, 1.41, 1.21, 1.11, 1.07, 1.05, 1.02, 1.06, # 3s3p
2.03, 1.76, # 4s
1.70, 1.60, 1.53, 1.39, 1.50, 1.42, 1.38, 1.24, 1.32, 1.22, # 3d
1.22, 1.20, 1.19, 1.20, 1.20, 1.16, # 4p
2.20, 1.95, # 5s
1.90, 1.75, 1.64, 1.54, 1.47, 1.46, 1.42, 1.39, 1.45, 1.44, # 4d
1.42, 1.39, 1.39, 1.38, 1.39, 1.40, # 5p
2.44, 2.15, # 6s
2.07, 2.04, 2.03, 2.01, 1.99, 1.98, 1.98, # La, Ce-Eu
1.96, 1.94, 1.92, 1.92, 1.89, 1.90, 1.87, 1.87, # Gd, Tb-Lu
1.75, 1.70, 1.62, 1.51, 1.44, 1.41, 1.36, 1.36, 1.32, # 5d
1.45, 1.46, 1.48, 1.40, 1.50, 1.50, # 6p
2.60, 2.21, # 7s
2.15, 2.06, 2.00, 1.96, 1.90, 1.87, 1.80, 1.69))
#
# vdw from ASE
#
# Van der Waals radii in [A] taken from
# http://www.webelements.com/periodicity/van_der_waals_radius/
# and the references given there.
# Additional source 5 from http://de.wikipedia.org/wiki/Van-der-Waals-Radius
#
# 1. A. Bondi, J. Phys. Chem., 1964, 68, 441.
#
# 2. L. Pauling, The Nature of the Chemical Bond,
# Cornell University Press, USA, 1945.
#
# 3. J.E. Huheey, E.A. Keiter, and R.L. Keiter in Inorganic Chemistry
# Principles of Structure and Reactivity, 4th edition, HarperCollins,
# New York, USA, 1993.W.W. Porterfield in Inorganic chemistry,
# a unified approach, Addison Wesley Publishing Co.,
# Reading Massachusetts, USA, 1984.
#
# 4. A.M. James and M.P. Lord in Macmillan's Chemical and Physical Data,
# Macmillan, London, UK, 1992.
#
# 5. Manjeera Mantina, Adam C. Chamberlin, Rosendo Valero,
# Christopher J. Cramer, Donald G. Truhlar Consistent van der Waals Radii
# for the Whole Main Group. In J. Phys. Chem. A. 2009, 113, 5806-5812,
# doi:10.1021/jp8111556
VDW = 1/BOHR * numpy.array((unknown, # Ghost atom
1.20, # 1 H
1.40, # 2 He [1]
1.82, # 3 Li [1]
1.53, # 4 Be [5]
1.92, # 5 B [5]
1.70, # 6 C [1]
1.55, # 7 N [1]
1.52, # 8 O [1]
1.47, # 9 F [1]
1.54, # 10 Ne [1]
2.27, # 11 Na [1]
1.73, # 12 Mg [1]
1.84, # 13 Al [5]
2.10, # 14 Si [1]
1.80, # 15 P [1]
1.80, # 16 S [1]
1.75, # 17 Cl [1]
1.88, # 18 Ar [1]
2.75, # 19 K [1]
2.31, # 20 Ca [5]
unknown, # 21 Sc
unknown, # 22 Ti
unknown, # 23 V
unknown, # 24 Cr
unknown, # 25 Mn
unknown, # 26 Fe
unknown, # 27 Co
1.63, # 28 Ni [1]
1.40, # 29 Cu [1]
1.39, # 30 Zn [1]
1.87, # 31 Ga [1]
2.11, # 32 Ge [5]
1.85, # 33 As [1]
1.90, # 34 Se [1]
1.85, # 35 Br [1]
2.02, # 36 Kr [1]
3.03, # 37 Rb [5]
2.49, # 38 Sr [5]
unknown, # 39 Y
unknown, # 40 Zr
unknown, # 41 Nb
unknown, # 42 Mo
unknown, # 43 Tc
unknown, # 44 Ru
unknown, # 45 Rh
1.63, # 46 Pd [1]
1.72, # 47 Ag [1]
1.58, # 48 Cd [1]
1.93, # 49 In [1]
2.17, # 50 Sn [1]
2.06, # 51 Sb [5]
2.06, # 52 Te [1]
1.98, # 53 I [1]
2.16, # 54 Xe [1]
3.43, # 55 Cs [5]
2.49, # 56 Ba [5]
unknown, # 57 La
unknown, # 58 Ce
unknown, # 59 Pr
unknown, # 60 Nd
unknown, # 61 Pm
unknown, # 62 Sm
unknown, # 63 Eu
unknown, # 64 Gd
unknown, # 65 Tb
unknown, # 66 Dy
unknown, # 67 Ho
unknown, # 68 Er
unknown, # 69 Tm
unknown, # 70 Yb
unknown, # 71 Lu
unknown, # 72 Hf
unknown, # 73 Ta
unknown, # 74 W
unknown, # 75 Re
unknown, # 76 Os
unknown, # 77 Ir
1.75, # 78 Pt [1]
1.66, # 79 Au [1]
1.55, # 80 Hg [1]
1.96, # 81 Tl [1]
2.02, # 82 Pb [1]
2.07, # 83 Bi [5]
1.97, # 84 Po [5]
2.02, # 85 At [5]
2.20, # 86 Rn [5]
3.48, # 87 Fr [5]
2.83, # 88 Ra [5]
unknown, # 89 Ac
unknown, # 90 Th
unknown, # 91 Pa
1.86, # 92 U [1]
unknown, # 93 Np
unknown, # 94 Pu
unknown, # 95 Am
unknown, # 96 Cm
unknown, # 97 Bk
unknown, # 98 Cf
unknown, # 99 Es
unknown, #100 Fm
unknown, #101 Md
unknown, #102 No
unknown, #103 Lr
))
# Universal Force Field (UFF)
# J. Am. Chem. Soc., 1992, 114 (25), pp 10024-10035
UFF = 1/BOHR * numpy.array((unknown, # Ghost atom
1.4430, # 1 H
1.8100, # 2 He
1.2255, # 3 Li
1.3725, # 4 Be
2.0415, # 5 B
1.9255, # 6 C
1.8300, # 7 N
1.7500, # 8 O
1.6820, # 9 F
1.6215, # 10 Ne
1.4915, # 11 Na
1.5105, # 12 Mg
2.2495, # 13 Al
2.1475, # 14 Si
2.0735, # 15 P
2.0175, # 16 S
1.9735, # 17 Cl
1.9340, # 18 Ar
1.9060, # 19 K
1.6995, # 20 Ca
1.6475, # 21 Sc
1.5875, # 22 Ti
1.5720, # 23 V
1.5115, # 24 Cr
1.4805, # 25 Mn
1.4560, # 26 Fe
1.4360, # 27 Co
1.4170, # 28 Ni
1.7475, # 29 Cu
1.3815, # 30 Zn
2.1915, # 31 Ga
2.1400, # 32 Ge
2.1150, # 33 As
2.1025, # 34 Se
2.0945, # 35 Br
2.0705, # 36 Kr
2.0570, # 37 Rb
1.8205, # 38 Sr
1.6725, # 39 Y
1.5620, # 40 Zr
1.5825, # 41 Nb
1.5260, # 42 Mo
1.4990, # 43 Tc
1.4815, # 44 Ru
1.4645, # 45 Rh
1.4495, # 46 Pd
1.5740, # 47 Ag
1.4240, # 48 Cd
2.2315, # 49 In
2.1960, # 50 Sn
2.2100, # 51 Sb
2.2350, # 52 Te
2.2500, # 53 I
2.2020, # 54 Xe
2.2585, # 55 Cs
1.8515, # 56 Ba
1.7610, # 57 La
1.7780, # 58 Ce
1.8030, # 59 Pr
1.7875, # 60 Nd
1.7735, # 61 Pm
1.7600, # 62 Sm
1.7465, # 63 Eu
1.6840, # 64 Gd
1.7255, # 65 Tb
1.7140, # 66 Dy
1.7045, # 67 Ho
1.6955, # 68 Er
1.6870, # 69 Tm
1.6775, # 70 Yb
1.8200, # 71 Lu
1.5705, # 72 Hf
1.5850, # 73 Ta
1.5345, # 74 W
1.4770, # 75 Re
1.5600, # 76 Os
1.4200, # 77 Ir
1.3770, # 78 Pt
1.6465, # 79 Au
1.3525, # 80 Hg
2.1735, # 81 Tl
2.1485, # 82 Pb
2.1850, # 83 Bi
2.3545, # 84 Po
2.3750, # 85 At
2.3825, # 86 Rn
2.4500, # 87 Fr
1.8385, # 88 Ra
1.7390, # 89 Ac
1.6980, # 90 Th
1.7120, # 91 Pa
1.6975, # 92 U
1.7120, # 93 Np
1.7120, # 94 Pu
1.6905, # 95 Am
1.6630, # 96 Cm
1.6695, # 97 Bk
1.6565, # 98 Cf
1.6495, # 99 Es
1.6430, #100 Fm
1.6370, #101 Md
1.6240, #102 No
1.6180, #103 Lr
unknown, #104 Rf
unknown, #105 Db
unknown, #106 Sg
unknown, #107 Bh
unknown, #108 Hs
unknown, #109 Mt
unknown, #110 Ds
unknown, #111 Rg
unknown, #112 Cn
unknown, #113 Nh
unknown, #114 Fl
unknown, #115 Mc
unknown, #116 Lv
unknown, #117 Ts
unknown, #118 Og
))
# Allinger's MM3 radii
# From http://pcmsolver.readthedocs.io/en/latest/users/input.html
MM3 = 1/BOHR * numpy.array((unknown, # Ghost atom
1.62, # 1 H
1.53, # 2 He
2.55, # 3 Li
2.23, # 4 Be
2.15, # 5 B
2.04, # 6 C
1.93, # 7 N
1.82, # 8 O
1.71, # 9 F
1.60, # 10 Ne
2.70, # 11 Na
2.43, # 12 Mg
2.36, # 13 Al
2.29, # 14 Si
2.22, # 15 P
2.15, # 16 S
2.07, # 17 Cl
1.99, # 18 Ar
3.09, # 19 K
2.81, # 20 Ca
2.61, # 21 Sc
2.39, # 22 Ti
2.29, # 23 V
2.25, # 24 Cr
2.24, # 25 Mn
2.23, # 26 Fe
2.23, # 27 Co
2.22, # 28 Ni
2.26, # 29 Cu
2.29, # 30 Zn
2.46, # 31 Ga
2.44, # 32 Ge
2.36, # 33 As
2.29, # 34 Se
2.22, # 35 Br
2.15, # 36 Kr
3.25, # 37 Rb
3.00, # 38 Sr
2.71, # 39 Y
2.54, # 40 Zr
2.43, # 41 Nb
2.39, # 42 Mo
2.36, # 43 Tc
2.34, # 44 Ru
2.34, # 45 Rh
2.37, # 46 Pd
2.43, # 47 Ag
2.50, # 48 Cd
2.64, # 49 In
2.59, # 50 Sn
2.52, # 51 Sb
2.44, # 52 Te
2.36, # 53 I
2.28, # 54 Xe
3.44, # 55 Cs
3.07, # 56 Ba
2.78, # 57 La
2.74, # 58 Ce
2.73, # 59 Pr
2.73, # 60 Nd
2.72, # 61 Pm
2.71, # 62 Sm
2.94, # 63 Eu
2.71, # 64 Gd
2.70, # 65 Tb
2.69, # 66 Dy
2.67, # 67 Ho
2.67, # 68 Er
2.67, # 69 Tm
2.79, # 70 Yb
2.65, # 71 Lu
2.53, # 72 Hf
2.43, # 73 Ta
2.39, # 74 W
2.37, # 75 Re
2.35, # 76 Os
2.36, # 77 Ir
2.39, # 78 Pt
2.43, # 79 Au
2.53, # 80 Hg
2.59, # 81 Tl
2.74, # 82 Pb
2.66, # 83 Bi
2.59, # 84 Po
2.51, # 85 At
2.43, # 86 Rn
3.64, # 87 Fr
3.27, # 88 Ra
3.08, # 89 Ac
2.74, # 90 Th
2.64, # 91 Pa
2.52, # 92 U
2.52, # 93 Np
2.52, # 94 Pu
unknown, # 95 Am
unknown, # 96 Cm
unknown, # 97 Bk
unknown, # 98 Cf
unknown, # 99 Es
unknown, #100 Fm
unknown, #101 Md
unknown, #102 No
unknown, #103 Lr
2.73, #104 Rf
2.63, #105 Db
unknown, #106 Sg
1.62, #107 Bh
unknown, #108 Hs
unknown, #109 Mt
unknown, #110 Ds
unknown, #111 Rg
unknown, #112 Cn
unknown, #113 Nh
unknown, #114 Fl
unknown, #115 Mc
unknown, #116 Lv
unknown, #117 Ts
unknown, #118 Og
))
del unknown
|
gkc1000/pyscf
|
pyscf/data/radii.py
|
Python
|
apache-2.0
| 13,757
|
[
"ASE",
"Dalton",
"PySCF"
] |
d663e77db9530056e0af426970ee6324025851aff3d200d4b751ca7d1a278971
|
#! /usr/bin/env python
#
# Copyright (C) 2011, 2012, 2014, 2015, 2016, 2017 David Maxwell and Constantine Khroulev
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import PISM
from petsc4py import PETSc
import os
context = PISM.Context()
ctx = context.ctx
config = context.config
PISM.set_abort_on_sigint(True)
usage = """\
sia_forward.py -i IN.nc [-o file.nc]
where:
-i IN.nc is input file in NetCDF format: contains PISM-written model state
notes:
* -i is required
"""
PISM.show_usage_check_req_opts(ctx.log(), "sia_forward.py", ["-i"], usage)
input_filename, input_set = PISM.optionsStringWasSet("-i", "input file")
if not input_set:
import sys
sys.exit(1)
output_file = PISM.optionsString("-o", "output file",
default="sia_" + os.path.basename(input_filename))
is_regional = PISM.optionsFlag("-regional",
"Compute SIA using regional model semantics", default=False)
verbosity = PISM.optionsInt("-verbose", "verbosity level", default=2)
registration = PISM.CELL_CENTER
if is_regional:
registration = PISM.CELL_CORNER
input_file = PISM.PIO(ctx.com(), "netcdf3", input_filename, PISM.PISM_READONLY)
grid = PISM.IceGrid.FromFile(ctx, input_file, "enthalpy", registration)
config.set_boolean("basal_resistance.pseudo_plastic.enabled", False)
enthalpyconverter = PISM.EnthalpyConverter(config)
modeldata = PISM.model.ModelData(grid)
modeldata.setPhysics(enthalpyconverter)
vecs = modeldata.vecs
vecs.add(PISM.model.createIceSurfaceVec(grid))
vecs.add(PISM.model.createIceThicknessVec(grid))
vecs.add(PISM.model.createBedrockElevationVec(grid))
vecs.add(PISM.model.createEnthalpyVec(grid))
vecs.add(PISM.model.createIceMaskVec(grid))
# Read in the PISM state variables that are used directly in the SSA solver
for v in [vecs.thk, vecs.topg, vecs.enthalpy]:
v.regrid(input_file, critical=True)
# variables mask and surface are computed from the geometry previously read
sea_level = 0 # FIXME setFromOption?
gc = PISM.GeometryCalculator(config)
gc.compute(sea_level, vecs.topg, vecs.thk, vecs.mask, vecs.surface_altitude)
# If running in regional mode, load in regional variables
if is_regional:
vecs.add(PISM.model.createNoModelMask(grid))
vecs.no_model_mask.regrid(input_file, critical=True)
if PISM.util.fileHasVariable(input_file, 'usurfstore'):
vecs.add(PISM.model.createIceSurfaceStoreVec(grid))
vecs.usurfstore.regrid(input_file, critical=True)
else:
vecs.add(vecs.surface, 'usurfstore')
solver = PISM.SIAFD_Regional
else:
solver = PISM.SIAFD
PISM.verbPrintf(2, context.com, "* Computing SIA velocities...\n")
vel_sia = PISM.sia.computeSIASurfaceVelocities(modeldata, siasolver=solver)
PISM.verbPrintf(2, context.com, "* Saving results to %s...\n" % output_file)
pio = PISM.util.prepare_output(output_file)
pio.close()
# Save time & command line & results
PISM.util.writeProvenance(output_file)
vel_sia.write(output_file)
|
talbrecht/pism_pik
|
examples/python/sia_forward.py
|
Python
|
gpl-3.0
| 3,653
|
[
"NetCDF"
] |
caa60b888f506ca7342401bad1cfa40e603d48409e6725252be6f1c6700bc15e
|
"""
Test course update
"""
from uuid import uuid4
from regression.pages.studio.login_studio import StudioLogin
from regression.pages.studio.course_info_studio import (
CourseUpdatesPageExtended
)
from regression.tests.studio.studio_base_test import StudioBaseTestClass
from regression.tests.helpers import LoginHelper, get_course_info
class CourseUpdateTest(StudioBaseTestClass):
"""
Test course update.
"""
def setUp(self):
super(CourseUpdateTest, self).setUp()
self.login_page = StudioLogin(self.browser)
LoginHelper.login(self.login_page)
self.course_info = get_course_info()
self.course_update_page = CourseUpdatesPageExtended(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_update_page.visit()
self.course_update_content_selector = '#course-update-list li' \
' .post-preview .update-contents'
self.course_update_text = 'New update:{}'.format(uuid4().hex)
def create_course_update(self):
"""
Create course update and verify
"""
self.course_update_page.open_new_update_form()
self.course_update_page.write_update_and_save(self.course_update_text)
# Assert course update has been created successfully.
self.assertEqual(
self.course_update_page.q(
css=self.course_update_content_selector)[0].text,
self.course_update_text
)
def test_create_course_update(self):
"""
Verifies creation of course update
"""
# Create course update
self.create_course_update()
def test_edit_course_update(self):
"""
Verify editing course update
"""
# Create course update
self.create_course_update()
# Edit course update
course_update_edit_text = 'Edited update:{}'.format(uuid4().hex)
# Edit the course update and save.
self.course_update_page.edit_course_update(course_update_edit_text)
# Verify that the edit has been saved correctly and is visible.
self.assertEqual(
self.course_update_page.q(
css=self.course_update_content_selector)[0].text,
course_update_edit_text
)
def test_delete_course_update(self):
"""
Verify deletion of course update
"""
# Create course update
self.create_course_update()
# Delete course update
self.course_update_page.delete_course_update()
# If there are no course updates present anymore
# then we assume that deletion was successful.
# If present then make sure the contents don't match.
if self.course_update_page.q(
css=self.course_update_content_selector).present:
self.assertNotEqual(
self.course_update_page.q(
css=self.course_update_content_selector)[0].text,
self.course_update_text
)
class CourseHandoutTest(StudioBaseTestClass):
"""
Test course handout
"""
def setUp(self):
super(CourseHandoutTest, self).setUp()
self.login_page = StudioLogin(self.browser)
LoginHelper.login(self.login_page)
self.course_info = get_course_info()
self.course_update_page = CourseUpdatesPageExtended(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_update_page.visit()
def test_edit_course_handout(self):
"""
Verifies that user can edit course handout
"""
course_handout_content = 'New handout content:{}'.format(uuid4().hex)
# Edit course handout
self.course_update_page.edit_course_handout(course_handout_content)
# Verify that the edit has been saved correctly and is visible.
self.assertEqual(
self.course_update_page.q(css='.handouts-content')[0].text,
course_handout_content
)
# Discard the update.
self.course_update_page.edit_course_handout("")
# Verify that the edit has been saved correctly and is visible.
self.assertEqual(
self.course_update_page.q(css='.handouts-content')[0].text,
""
)
|
raeeschachar/edx-e2e-mirror
|
regression/tests/studio/test_course_update.py
|
Python
|
agpl-3.0
| 4,480
|
[
"VisIt"
] |
7094656145490e54cd1b86ff63239a6bf914b757e62466c00e9b23b3e7ed08bb
|
from __future__ import unicode_literals
import base64
import datetime
import json
import logging
# For Python 3.0 and later
from urllib.request import Request, urlopen
from django.db.models import Max
from django.utils import safestring
import numpy as np
import re
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from pyMSpec.pyisocalc import pyisocalc
# Create your models here.
class Adduct(models.Model):
nM = models.IntegerField(default=1)
delta_formula = models.TextField(default="") # addition/loss groups per M (sum of +X-Y)
delta_atoms = models.TextField(default="") # net atom addition/loss
charge = models.IntegerField(default=1) # overall charge after atom change
def charge_str(self):
if np.sign(self.charge) == 1:
c_sign = "+"
else:
c_sign = "-"
n_charges = np.abs(self.charge)
if n_charges == 1:
n_charges=""
return "{}{}".format(n_charges,c_sign)
def nice_str(self):
_nM = self.nM
_charge = self.charge
if _nM == 1:
_nM = ""
if np.abs(_charge) == 1:
_charge = self.charge_str()
return "[{}M{}]{}".format(_nM, self.delta_formula, _charge)
def html_str(self):
return safestring.mark_safe("[{}M{}]<sup>{}</sup>".format(self.nM, self.delta_formula, self.charge_str()).replace("1", ""))
def __unicode__(self):
#!! don't edit this - I'm an idiot so it's used as a key in Molecule!!#
return "[{}M{}]{}".format(self.nM, self.delta_formula, self.charge)
def __str__(self):
return self.html_str()
def get_delta_atoms(self):
def addElement(elDict, element, number):
elDict.setdefault(element, []).append(number)
self.delta_formula = self.delta_formula.strip()
if all([self.delta_formula.startswith("+"), self.delta_formula.startswith("-")]):
self.delta_formula = "+" + self.delta_formula
formula_split = re.split(u'([+-])', self.delta_formula)
el_dict = {}
for sign, el in zip(formula_split[1::2], formula_split[2::2]):
this_el_dict = dict([(segment.element().name(), int("{}1".format(sign)) * segment.amount()) for segment in
pyisocalc.parseSumFormula(el).get_segments()])
for this_el in this_el_dict:
addElement(el_dict, this_el, this_el_dict[this_el])
sign_dict = {1: "+", -1: "-"}
for this_el in el_dict:
el_dict[this_el] = sum(el_dict[this_el])
el_string = "".join(["{}{}{}".format(sign_dict[np.sign(el_dict[el])], el, abs(el_dict[el])) for el in el_dict if
el_dict[el] != 0])
return el_string
def save(self, *args, **kwargs):
self.delta_atoms = self.get_delta_atoms()
super(Adduct, self).save(*args, **kwargs)
class Molecule(models.Model):
_adduct_mzs = models.TextField(default="")
name = models.TextField(default="")
sum_formula = models.TextField(null=True)
inchi_code = models.TextField(default="")
exact_mass = models.FloatField(default=0.0)
solubility = models.TextField(null=True, blank=True)
# External reference numbers
hmdb_id = models.TextField(null=True, blank=True)
chebi_id = models.TextField(null=True, blank=True)
lipidmaps_id = models.TextField(null=True, blank=True)
cas_id = models.TextField(null=True, blank=True)
pubchem_id = models.TextField(null=True, blank=True)
tags = models.ManyToManyField('MoleculeTag', blank=True)
natural_product = models.BooleanField(default=True)
def get_adduct_mzs(self):
return json.loads(self._adduct_mzs)
def set_adduct_mzs(self):
adduct_dict = {}
for adduct in Adduct.objects.all():
adduct_dict[str(adduct.__unicode__())] = self.get_mz(adduct)
self._adduct_mzs = json.dumps(adduct_dict)
def get_adduct_mzs_by_pk(self):
by_str = self.get_adduct_mzs()
return {adduct.pk: by_str.get(str(adduct.__unicode__()), np.nan) for adduct in Adduct.objects.all()}
adduct_mzs = property(get_adduct_mzs, set_adduct_mzs)
adduct_mzs_by_pk = property(get_adduct_mzs_by_pk)
def get_mass(self):
logging.info(self.sum_formula)
logging.info(pyisocalc.parseSumFormula(self.sum_formula))
spec = pyisocalc.perfect_pattern(pyisocalc.parseSumFormula(self.sum_formula), charge=0)
logging.info(spec)
mass = spec.get_spectrum(source='centroids')[0][np.argmax(spec.get_spectrum(source='centroids')[1])]
logging.info(mass)
return mass
def __unicode__(self):
return u"".join([i for i in self.name if ord(i) < 128])
def __str__(self):
return self.__unicode__()
def html_str(self):
return safestring.mark_safe("{}".format(self.name))
def save(self, *args, **kwargs):
logging.info('starting save')
self.sum_formula = self.sum_formula.strip()
logging.info('starting sf')
self.exact_mass = self.get_mass()
logging.info('starting adduct')
self.set_adduct_mzs()
logging.info('ready to save')
super(Molecule, self).save(*args, **kwargs)
def make_ion_formula(self, adduct):
formula = "({}){}{}".format(self.sum_formula, adduct.nM, adduct.delta_atoms)
return formula
def get_mz(self, adduct):
"""
Calculate the precursor mass for this molecule with a given adduct
:param adduct: object of class Adduct
:return: float
"""
try:
formula = self.make_ion_formula(adduct)
spec = pyisocalc.perfect_pattern(pyisocalc.parseSumFormula(formula), charge=adduct.charge)
mass = spec.get_spectrum(source='centroids')[0][np.argmax(spec.get_spectrum(source='centroids')[1])]
return mass
except:
logging.debug(self.name, adduct)
return -1.
def spectra_count(self):
return FragmentationSpectrum.objects.all().filter(standard__molecule=self).count()
@property
def smiles(self):
import pybel
try:
return pybel.readstring(b'inchi', self.inchi_code.encode('ascii')).write(b'smi').strip()
except IOError:
logging.error('Could not read InChI code: {}'.format(self.inchi_code))
return '??'
class Standard(models.Model):
inventory_id = models.IntegerField(db_column='MCFID', unique=True)
molecule = models.ForeignKey(Molecule)
vendor = models.TextField(null=True, blank=True)
vendor_cat = models.TextField(null=True, blank=True)
lot_num = models.TextField(null=True, blank=True)
location = models.TextField(null=True, blank=True)
purchase_date = models.DateField(null=True, blank=True)
def save(self, *args, **kwargs):
if self.inventory_id is None:
standards = Standard.objects.all()
max_ = standards.aggregate(Max('inventory_id'))['inventory_id__max']
if max_ is None: # if there are no standards
max_ = 0
self.inventory_id = max_ + 1
super(Standard, self).save(*args, **kwargs)
def __unicode__(self):
return "{}: {}".format(self.inventory_id, self.molecule.name)
def __str__(self):
return self.__unicode__()
class LcInfo(models.Model):
content = models.TextField()
def __unicode__(self):
return self.content
def __str__(self):
return self.__unicode__()
class MsInfo(models.Model):
content = models.TextField()
def __unicode__(self):
return self.content
def __str__(self):
return self.__unicode__()
class InstrumentInfo(models.Model):
content = models.TextField()
def __unicode__(self):
return self.content
def __str__(self):
return self.__unicode__()
class Dataset(models.Model):
processing_finished = models.BooleanField(default=False)
name = models.TextField(default="")
path = models.TextField(default="")
adducts_present = models.ManyToManyField(Adduct, blank=True)
standards_present = models.ManyToManyField(Standard, blank=True)
mass_accuracy_ppm = models.FloatField(default=10.0)
quad_window_mz = models.FloatField(default=1.0)
lc_info = models.ManyToManyField(to=LcInfo)
ms_info = models.ManyToManyField(to=MsInfo)
instrument_info = models.ManyToManyField(to=InstrumentInfo)
ionization_method = models.TextField(default="")
ion_analyzer = models.TextField(default="")
date_added = models.DateTimeField(auto_now_add=True, blank=True)
date_modified = models.DateTimeField(auto_now=True, blank=True)
# (for xic search)
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
class Xic(models.Model):
mz = models.FloatField(default=0.0)
dataset = models.ForeignKey(Dataset)
_xic = models.TextField(
db_column='data',
blank=True)
_rt = models.TextField(
db_column='rt_data',
blank=True)
standard = models.ForeignKey(Standard, blank=True, null=True)
adduct = models.ForeignKey(Adduct, blank=True, null=True)
collision = models.TextField(default='')
def set_xic(self, xic):
xic = np.asarray(xic, dtype=np.float64)
self._xic = base64.b64encode(xic)
def set_rt(self, rt):
rt = np.asarray(rt, dtype=np.float64)
self._rt = base64.b64encode(rt)
def get_xic(self):
r = base64.b64decode(self._xic)
return np.frombuffer(r, dtype=np.float64)
def get_rt(self):
r = base64.b64decode(self._rt)
return np.frombuffer(r, dtype=np.float64)
xic = property(get_xic, set_xic)
rt = property(get_rt, set_rt)
def check_mass(self, tol_ppm=100):
tol_mz = self.mz * tol_ppm * 1e-6
theor_mz = self.standard.molecule.get_mz(self.adduct)
if np.abs(theor_mz - self.mz) > tol_mz:
raise ValueError('Mass tolerance not satisfied {} {}'.format(theor_mz, self.mz))
return True
# todo(An)
# extend save to check that standard+adduct mass == precursor
class FragmentationSpectrum(models.Model):
precursor_mz = models.FloatField(null=True)
ms1_intensity = models.FloatField(default=0.0)
_centroid_mzs = models.TextField()
_centroid_ints = models.TextField()
collision_energy = models.TextField(default="")
dataset = models.ForeignKey(Dataset)
standard = models.ForeignKey(Standard, blank=True, null=True)
adduct = models.ForeignKey(Adduct, blank=True, null=True)
spec_num = models.IntegerField(blank=True, null=True)
rt = models.FloatField(blank=True, null=True)
precursor_quad_fraction = models.FloatField(blank=True, null=True)
reviewed = models.BooleanField(default=0)
date_added = models.DateField(default=timezone.now)
date_edited = models.DateField(default=timezone.now)
last_editor = models.ForeignKey(User, blank=True, null=True)
def __unicode__(self):
return "{} {:3.2f}".format(self.spec_num, self.precursor_mz)
def __str__(self):
return self.__unicode__()
def set_centroid_mzs(self, mzs):
mzs = np.asarray(mzs, dtype=np.float64)
self._centroid_mzs = base64.b64encode(mzs)
def get_centroid_mzs(self):
r = base64.b64decode(self._centroid_mzs)
return np.frombuffer(r, dtype=np.float64)
centroid_mzs = property(get_centroid_mzs, set_centroid_mzs)
def set_centroid_ints(self, values):
values = np.asarray(values, dtype=np.float64)
self._centroid_ints = base64.b64encode(values)
def get_centroid_ints(self):
r = base64.b64decode(self._centroid_ints)
return np.frombuffer(r, dtype=np.float64)
centroid_ints = property(get_centroid_ints, set_centroid_ints)
def get_centroids(self):
return self.centroid_mzs, self.centroid_ints
def save(self, *args, **kwargs):
if not self.pk:
self.date_added = datetime.datetime.now()
self.date_edited = datetime.datetime.now()
super(FragmentationSpectrum, self).save(*args, **kwargs)
@property
def base_peak(self):
spec = self.get_centroids()
return spec[0][np.argmax(spec[1])]
@property
def splash(self):
splash_payload = json.dumps({
"ions": [{"mass": mz, "intensity": int_} for mz, int_ in zip(self.centroid_mzs, self.centroid_ints)],
"type": "MS"})
url = "http://splash.fiehnlab.ucdavis.edu/splash/it"
request = Request(url, data=splash_payload, headers={'Content-Type': "application/json"})
response = urlopen(request).read().decode()
return response
@property
def massbank_accession(self): # return a six digit number
return "{:06.0f}".format(self.id % 999999) #horrible hack
class MoleculeSpectraCount(models.Model):
molecule = models.ForeignKey(Molecule, primary_key=True, on_delete=models.DO_NOTHING)
spectra_count = models.IntegerField()
class Meta:
managed = False
class ProcessingError(models.Model):
dataset = models.ForeignKey(Dataset)
message = models.TextField()
class MoleculeTag(models.Model):
name = models.TextField()
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
|
alexandrovteam/curatr
|
mcf_standard_browser/standards_review/models.py
|
Python
|
apache-2.0
| 13,487
|
[
"Pybel"
] |
352d569232469b290857636513dfe77b56813130635a216906ce31a615483e0e
|
# -*- coding: utf-8 -*-
import fedmsg.tests.test_meta
import arrow
class TestPagureConglomeratorByIssueAndPR(
fedmsg.tests.test_meta.ConglomerateBase):
expected = [
{
'categories': set(['pagure']),
'end_time': 1458308863.0,
'human_time': arrow.get(1458307676).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/pungi/issue/231',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/a89b57d99dcf12d40ec2b9fb05910b90293b13b0b87415208bedc897bc18a354?s=64&d=retro',
'start_time': 1458306489.0,
'subjective': 'ausil and lsedlar interacted with issue #231 of project "pungi" 2 times',
'subtitle': 'ausil and lsedlar interacted with issue #231 of project "pungi" 2 times',
'timestamp': 1458307676.0,
'topics': set(['io.pagure.prod.pagure.issue.comment.added']),
'usernames': set(['ausil', 'lsedlar'])
}, {
'categories': set(['pagure']),
'end_time': 1458307490.0,
'human_time': arrow.get(1458307490).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/fork/bonnegent/pagure',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/1216fff466c9dbb6ce85ac95bf8f45b9e19421af97de67945852722b899a34ee?s=64&d=retro',
'start_time': 1458307490.0,
'subjective': u'bonnegent forked pagure to fork/bonnegent/pagure',
'subtitle': u'bonnegent forked pagure to fork/bonnegent/pagure',
'timestamp': 1458307490.0,
'topics': set(['io.pagure.prod.pagure.project.forked']),
'usernames': set(['bonnegent'])
}, {
'categories': set(['pagure']),
'end_time': 1458307394.0,
'human_time': arrow.get(1458307394).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/pungi-fedora/pull-request/19',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/0929fed032bd0a481ef74c46023fefe443f3d1b72dbe3efd293b25ed4fc843fd?s=64&d=retro',
'start_time': 1458307394.0,
'subjective': 'sgallagh interacted with pull-request #19 of project "pungi-fedora" 2 times',
'subtitle': 'sgallagh interacted with pull-request #19 of project "pungi-fedora" 2 times',
'timestamp': 1458307394.0,
'topics': set(['io.pagure.prod.pagure.pull-request.comment.added',
'io.pagure.prod.pagure.pull-request.new']),
'usernames': set(['sgallagh'])
}, {
'categories': set(['pagure']),
'end_time': 1458307333.0,
'human_time': arrow.get(1458307333).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/pungi-fedora/pull-request/18',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/0929fed032bd0a481ef74c46023fefe443f3d1b72dbe3efd293b25ed4fc843fd?s=64&d=retro',
'start_time': 1458307333.0,
'subjective': 'sgallagh interacted with pull-request #18 of project "pungi-fedora" 2 times',
'subtitle': 'sgallagh interacted with pull-request #18 of project "pungi-fedora" 2 times',
'timestamp': 1458307333.0,
'topics': set(['io.pagure.prod.pagure.pull-request.comment.added',
'io.pagure.prod.pagure.pull-request.new']),
'usernames': set(['sgallagh'])
}, {
'categories': set(['pagure']),
'end_time': 1458306395.0,
'human_time': arrow.get(1458306374.5).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/pungi/pull-request/235',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/e11f439e57cde0130fda04ad14b4f24376d56f6b0daae3e8f41fda1a05600651?s=64&d=retro',
'start_time': 1458306354.0,
'subjective': 'lsedlar interacted with pull-request #235 of project "pungi" 2 times',
'subtitle': 'lsedlar interacted with pull-request #235 of project "pungi" 2 times',
'timestamp': 1458306374.5,
'topics': set(['io.pagure.prod.pagure.pull-request.flag.added',
'io.pagure.prod.pagure.pull-request.new']),
'usernames': set(['lsedlar'])
}, {
'categories': set(['pagure']),
'end_time': 1458306074.0,
'human_time': arrow.get(1458305616.4).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/pungi/pull-request/234',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/e11f439e57cde0130fda04ad14b4f24376d56f6b0daae3e8f41fda1a05600651?s=64&d=retro',
'start_time': 1458304911.0,
'subjective': 'lsedlar interacted with pull-request #234 of project "pungi" 5 times',
'subtitle': 'lsedlar interacted with pull-request #234 of project "pungi" 5 times',
'timestamp': 1458305616.4,
'topics': set(['io.pagure.prod.pagure.pull-request.closed',
'io.pagure.prod.pagure.pull-request.comment.added',
'io.pagure.prod.pagure.pull-request.flag.added',
'io.pagure.prod.pagure.pull-request.new']),
'usernames': set(['lsedlar'])
}, {
'categories': set(['pagure']),
'end_time': 1458305858.0,
'human_time': arrow.get(1458305858.0).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/pagure/issue/849',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/1216fff466c9dbb6ce85ac95bf8f45b9e19421af97de67945852722b899a34ee?s=64&d=retro',
'start_time': 1458305858.0,
'subjective': u'bonnegent opened a new ticket pagure#849: "pagure on python3"',
'subtitle': u'bonnegent opened a new ticket pagure#849: "pagure on python3"',
'timestamp': 1458305858.0,
'topics': set(['io.pagure.prod.pagure.issue.new']),
'usernames': set(['bonnegent'])
}, {
'categories': set(['pagure']),
'end_time': 1458303598.0,
'human_time': arrow.get(1458299536.142857).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/pagure/pull-request/843',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/ad9e5c1cfd5d5180a6b9a8ebdc5fc91fbd899dd4d2fe780b4f9963598216d7f8?s=64&d=retro',
'start_time': 1458298555.0,
'subjective': 'aavrug and pingou interacted with pull-request #843 of project "pagure" 7 times',
'subtitle': 'aavrug and pingou interacted with pull-request #843 of project "pagure" 7 times',
'timestamp': 1458299536.142857,
'topics': set(['io.pagure.prod.pagure.pull-request.comment.added']),
'usernames': set(['aavrug', 'pingou']),
}, {
'categories': set(['pagure']),
'end_time': 1458298137.0,
'human_time': arrow.get(1458298005.5).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/pagure/issue/833',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c?s=64&d=retro',
'start_time': 1458297874.0,
'subjective': 'pingou interacted with issue #833 of project "pagure" 2 times',
'subtitle': 'pingou interacted with issue #833 of project "pagure" 2 times',
'timestamp': 1458298005.5,
'topics': set(['io.pagure.prod.pagure.issue.comment.added']),
'usernames': set(['pingou']),
}, {
'categories': set(['pagure']),
'end_time': 1458297187.0,
'human_time': arrow.get(1458297187.0).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/pagure/pull-request/848#comment-3484',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c?s=64&d=retro',
'start_time': 1458297187.0,
'subjective': u'pingou commented on PR #848 on pagure',
'subtitle': u'pingou commented on PR #848 on pagure',
'timestamp': 1458297187.0,
'topics': set(['io.pagure.prod.pagure.pull-request.comment.added']),
'usernames': set(['pingou'])}
]
originals = [
{
"i": 2,
"msg": {
"agent": "ausil",
"issue": {
"assignee": None,
"blocks": [],
"comments": [
{
"comment": "looking at the lorax code find_templates only exists in the f24-branch and master. ",
"date_created": "1458167951",
"edited_on": None,
"editor": None,
"id": 2021,
"parent": None,
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
{
"comment": "pungi is actually doing terrible things here. it is making assumptions on the compose box where the templates will be in the runroot environment. we are going to have to run something in the chroot that will tell us or we need to move to making the DVD as part of the process that makes the install tree. which is what pungi cli does",
"date_created": "1458228506",
"edited_on": None,
"editor": None,
"id": 2030,
"parent": None,
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
{
"comment": "In my opinion th best solution would be to add a configuration option to specify the path so that there is no guessing. See #235. For lack of better name I called it `iso_boot_option_map`, which is not really descriptive, but I don't know exactly what this path means.\r\n\r\nAnother solution would be to modify the command in `runroot` task to call pylorax, find the correct path and substitute that into proper place (with some fallback if pylorax is not available). This is not easy to implement as I can't bypass some of the quoting.\r\n\r\nTechnically, we could hack it and run a separate `runroot` task to find the directory ([example](http://koji.stg.fedoraproject.org/koji/taskinfo?taskID=90043678)), put it into the command on the compose box and then continue as usual. The code is not that difficult (#234), but it is not really a solution.",
"date_created": "1458306489",
"edited_on": None,
"editor": None,
"id": 2037,
"parent": None,
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
{
"comment": "I think if we write a command in pungi that makes the dvd iso and is able to figure it out is best. we then change the runroot call to install pungi, execute the command to make the dvd. its all then nicely contained. and we do not make any assumptions on the runroot environment ",
"date_created": "1458308858",
"edited_on": None,
"editor": None,
"id": 2038,
"parent": None,
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
}
],
"content": "Related to PR #230 we need to be able to dynamically work out where the templates are located depending on the version of lorax so we don't need to hard code locations. ",
"date_created": "1458167510",
"depends": [],
"id": 231,
"private": False,
"status": "Open",
"tags": [],
"title": "Use pylorax to locate templates",
"user": {
"fullname": "Peter Robinson",
"name": "pbrobinson"
}
},
"project": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
}
},
"msg_id": "2016-f6837c28-7ae7-4417-8cdb-8b1017b114c3",
"timestamp": 1458308863.0,
"topic": "io.pagure.prod.pagure.issue.comment.added"
},
{
"i": 2,
"msg": {
"agent": "bonnegent",
"project": {
"date_created": "1458307490",
"description": "A git centered forge",
"id": 429,
"name": "pagure",
"parent": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": False,
"project_documentation": True,
"pull_requests": False
},
"tags": [],
"user": {
"fullname": "Sébastien Bonnegent",
"name": "bonnegent"
}
}
},
"msg_id": "2016-8801e2f4-42f1-45d5-8272-6457a3d85c4d",
"timestamp": 1458307490.0,
"topic": "io.pagure.prod.pagure.project.forked"
},
{
"i": 2,
"msg": {
"agent": "sgallagh",
"pullrequest": {
"assignee": None,
"branch": "f24",
"branch_from": "f24",
"closed_at": None,
"closed_by": None,
"comments": [],
"commit_start": None,
"commit_stop": None,
"date_created": "1458307394",
"id": 19,
"project": {
"date_created": "1432928381",
"description": "fedora config files for pungi",
"id": 36,
"name": "pungi-fedora",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449595462",
"description": "fedora config files for pungi",
"id": 284,
"name": "pungi-fedora",
"parent": {
"date_created": "1432928381",
"description": "fedora config files for pungi",
"id": 36,
"name": "pungi-fedora",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Stephen Gallagher",
"name": "sgallagh"
}
},
"status": "Open",
"title": "(F24) Server: Add \"Fedora Custom Operating System\" Environment",
"uid": "8c6252b757ca4ff2a0f23cbc41e32ba7",
"updated_on": "1458307394",
"user": {
"fullname": "Stephen Gallagher",
"name": "sgallagh"
}
}
},
"msg_id": "2016-7e1ee254-ad19-4d8f-8568-867646aad2a0",
"timestamp": 1458307394.0,
"topic": "io.pagure.prod.pagure.pull-request.new"
},
{
"i": 3,
"msg": {
"agent": "sgallagh",
"pullrequest": {
"assignee": None,
"branch": "f24",
"branch_from": "f24",
"closed_at": None,
"closed_by": None,
"comments": [],
"commit_start": None,
"commit_stop": None,
"date_created": "1458307394",
"id": 19,
"project": {
"date_created": "1432928381",
"description": "fedora config files for pungi",
"id": 36,
"name": "pungi-fedora",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449595462",
"description": "fedora config files for pungi",
"id": 284,
"name": "pungi-fedora",
"parent": {
"date_created": "1432928381",
"description": "fedora config files for pungi",
"id": 36,
"name": "pungi-fedora",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Stephen Gallagher",
"name": "sgallagh"
}
},
"status": "Open",
"title": "(F24) Server: Add \"Fedora Custom Operating System\" Environment",
"uid": "8c6252b757ca4ff2a0f23cbc41e32ba7",
"updated_on": "1458307394",
"user": {
"fullname": "Stephen Gallagher",
"name": "sgallagh"
}
}
},
"msg_id": "2016-0d5aa115-1f78-45d4-bde1-a3b73ac0fd2c",
"timestamp": 1458307394.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 2,
"msg": {
"agent": "sgallagh",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "master",
"closed_at": None,
"closed_by": None,
"comments": [],
"commit_start": None,
"commit_stop": None,
"date_created": "1458307332",
"id": 18,
"project": {
"date_created": "1432928381",
"description": "fedora config files for pungi",
"id": 36,
"name": "pungi-fedora",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449595462",
"description": "fedora config files for pungi",
"id": 284,
"name": "pungi-fedora",
"parent": {
"date_created": "1432928381",
"description": "fedora config files for pungi",
"id": 36,
"name": "pungi-fedora",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Stephen Gallagher",
"name": "sgallagh"
}
},
"status": "Open",
"title": "(F25/Rawhide) Server: Add \"Fedora Custom Operating System\" Environment",
"uid": "954179ff5f6a474aaaaccb5dc98116aa",
"updated_on": "1458307332",
"user": {
"fullname": "Stephen Gallagher",
"name": "sgallagh"
}
}
},
"msg_id": "2016-b58f6e52-dbc8-4082-8f2a-4d8aa6b4f988",
"timestamp": 1458307333.0,
"topic": "io.pagure.prod.pagure.pull-request.new"
},
{
"i": 3,
"msg": {
"agent": "sgallagh",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "master",
"closed_at": None,
"closed_by": None,
"comments": [],
"commit_start": None,
"commit_stop": None,
"date_created": "1458307332",
"id": 18,
"project": {
"date_created": "1432928381",
"description": "fedora config files for pungi",
"id": 36,
"name": "pungi-fedora",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449595462",
"description": "fedora config files for pungi",
"id": 284,
"name": "pungi-fedora",
"parent": {
"date_created": "1432928381",
"description": "fedora config files for pungi",
"id": 36,
"name": "pungi-fedora",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Stephen Gallagher",
"name": "sgallagh"
}
},
"status": "Open",
"title": "(F25/Rawhide) Server: Add \"Fedora Custom Operating System\" Environment",
"uid": "954179ff5f6a474aaaaccb5dc98116aa",
"updated_on": "1458307332",
"user": {
"fullname": "Stephen Gallagher",
"name": "sgallagh"
}
}
},
"msg_id": "2016-6bf8d983-75d0-484f-88ee-20575f1195d0",
"timestamp": 1458307333.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 2,
"msg": {
"agent": "lsedlar",
"issue": {
"assignee": None,
"blocks": [],
"comments": [
{
"comment": "looking at the lorax code find_templates only exists in the f24-branch and master. ",
"date_created": "1458167951",
"edited_on": None,
"editor": None,
"id": 2021,
"parent": None,
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
{
"comment": "pungi is actually doing terrible things here. it is making assumptions on the compose box where the templates will be in the runroot environment. we are going to have to run something in the chroot that will tell us or we need to move to making the DVD as part of the process that makes the install tree. which is what pungi cli does",
"date_created": "1458228506",
"edited_on": None,
"editor": None,
"id": 2030,
"parent": None,
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
{
"comment": "In my opinion th best solution would be to add a configuration option to specify the path so that there is no guessing. See #235. For lack of better name I called it `iso_boot_option_map`, which is not really descriptive, but I don't know exactly what this path means.\r\n\r\nAnother solution would be to modify the command in `runroot` task to call pylorax, find the correct path and substitute that into proper place (with some fallback if pylorax is not available). This is not easy to implement as I can't bypass some of the quoting.\r\n\r\nTechnically, we could hack it and run a separate `runroot` task to find the directory ([example](http://koji.stg.fedoraproject.org/koji/taskinfo?taskID=90043678)), put it into the command on the compose box and then continue as usual. The code is not that difficult (#234), but it is not really a solution.",
"date_created": "1458306489",
"edited_on": None,
"editor": None,
"id": 2037,
"parent": None,
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
],
"content": "Related to PR #230 we need to be able to dynamically work out where the templates are located depending on the version of lorax so we don't need to hard code locations. ",
"date_created": "1458167510",
"depends": [],
"id": 231,
"private": False,
"status": "Open",
"tags": [],
"title": "Use pylorax to locate templates",
"user": {
"fullname": "Peter Robinson",
"name": "pbrobinson"
}
},
"project": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
}
},
"msg_id": "2016-f887623c-481b-450c-8b1e-5445a85f5c53",
"timestamp": 1458306489.0,
"topic": "io.pagure.prod.pagure.issue.comment.added"
},
{
"i": 1,
"msg": {
"agent": "lsedlar",
"flag": {
"comment": "Build successful",
"date_created": "1458306394",
"percent": "100",
"pull_request_uid": "74df685a56804088aa9728684ba588bd",
"uid": "32db8e8ab4fe43f3b217a7662c20c790",
"url": "http://jenkins.fedorainfracloud.org/job/pungi/149/",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
},
"username": "Jenkins"
},
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "lorax-dir",
"closed_at": None,
"closed_by": None,
"comments": [],
"commit_start": "6a8eed085c3693ad3d88b063dce363a0ce82d49d",
"commit_stop": "6a8eed085c3693ad3d88b063dce363a0ce82d49d",
"date_created": "1458306353",
"id": 235,
"project": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1447057736",
"description": "Distribution compose tool",
"id": 244,
"name": "pungi",
"parent": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": "http://46.101.221.203:8000/",
"always_merge": False,
"issue_tracker": False,
"project_documentation": True,
"pull_requests": False
},
"tags": [],
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
"status": "Open",
"title": "Add option for lorax template dir",
"uid": "74df685a56804088aa9728684ba588bd",
"updated_on": "1458306356",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
},
"msg_id": "2016-f9b21316-6dad-4bdc-905b-c8ba6d9452a4",
"timestamp": 1458306395.0,
"topic": "io.pagure.prod.pagure.pull-request.flag.added"
},
{
"i": 1,
"msg": {
"agent": "lsedlar",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "lorax-dir",
"closed_at": None,
"closed_by": None,
"comments": [],
"commit_start": None,
"commit_stop": None,
"date_created": "1458306353",
"id": 235,
"project": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1447057736",
"description": "Distribution compose tool",
"id": 244,
"name": "pungi",
"parent": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": "http://46.101.221.203:8000/",
"always_merge": False,
"issue_tracker": False,
"project_documentation": True,
"pull_requests": False
},
"tags": [],
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
"status": "Open",
"title": "Add option for lorax template dir",
"uid": "74df685a56804088aa9728684ba588bd",
"updated_on": "1458306353",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
},
"msg_id": "2016-4b71901b-5b39-4096-9c01-2e98efac7a37",
"timestamp": 1458306354.0,
"topic": "io.pagure.prod.pagure.pull-request.new"
},
{
"i": 1,
"msg": {
"agent": "lsedlar",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "lorax-find",
"closed_at": "1458306073",
"closed_by": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
},
"comments": [
{
"comment": "Please don't merge this!\r\n\r\nIt's a really ugly hack. I'm just posting it here as a proof-of-concept.",
"commit": None,
"date_created": "1458306068",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3493,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
],
"commit_start": "6e2d96d15d0b386c64658707eacae42007c56504",
"commit_stop": "6e2d96d15d0b386c64658707eacae42007c56504",
"date_created": "1458304909",
"id": 234,
"project": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1447057736",
"description": "Distribution compose tool",
"id": 244,
"name": "pungi",
"parent": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": "http://46.101.221.203:8000/",
"always_merge": False,
"issue_tracker": False,
"project_documentation": True,
"pull_requests": False
},
"tags": [],
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
"status": "Closed",
"title": "[createiso] Add hack to get template dir from lorax",
"uid": "08dcb7bf15574051ad1fef8af87cd3c7",
"updated_on": "1458306073",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
},
"msg_id": "2016-4614f57c-b3c8-4ed3-ad8e-a4cefd716986",
"timestamp": 1458306074.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 2,
"msg": {
"agent": "lsedlar",
"merged": False,
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "lorax-find",
"closed_at": "1458306073",
"closed_by": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
},
"comments": [
{
"comment": "Please don't merge this!\r\n\r\nIt's a really ugly hack. I'm just posting it here as a proof-of-concept.",
"commit": None,
"date_created": "1458306068",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3493,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
],
"commit_start": "6e2d96d15d0b386c64658707eacae42007c56504",
"commit_stop": "6e2d96d15d0b386c64658707eacae42007c56504",
"date_created": "1458304909",
"id": 234,
"project": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1447057736",
"description": "Distribution compose tool",
"id": 244,
"name": "pungi",
"parent": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": "http://46.101.221.203:8000/",
"always_merge": False,
"issue_tracker": False,
"project_documentation": True,
"pull_requests": False
},
"tags": [],
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
"status": "Closed",
"title": "[createiso] Add hack to get template dir from lorax",
"uid": "08dcb7bf15574051ad1fef8af87cd3c7",
"updated_on": "1458306073",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
},
"msg_id": "2016-07c4ccd5-e334-458f-a1bd-fdc1aebacbe6",
"timestamp": 1458306074.0,
"topic": "io.pagure.prod.pagure.pull-request.closed"
},
{
"i": 1,
"msg": {
"agent": "lsedlar",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "lorax-find",
"closed_at": None,
"closed_by": None,
"comments": [
{
"comment": "Please don't merge this!\r\n\r\nIt's a really ugly hack. I'm just posting it here as a proof-of-concept.",
"commit": None,
"date_created": "1458306068",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3493,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
],
"commit_start": "6e2d96d15d0b386c64658707eacae42007c56504",
"commit_stop": "6e2d96d15d0b386c64658707eacae42007c56504",
"date_created": "1458304909",
"id": 234,
"project": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1447057736",
"description": "Distribution compose tool",
"id": 244,
"name": "pungi",
"parent": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": "http://46.101.221.203:8000/",
"always_merge": False,
"issue_tracker": False,
"project_documentation": True,
"pull_requests": False
},
"tags": [],
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
"status": "Open",
"title": "[createiso] Add hack to get template dir from lorax",
"uid": "08dcb7bf15574051ad1fef8af87cd3c7",
"updated_on": "1458304912",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
},
"msg_id": "2016-32b8d742-0b52-477a-99aa-b74aaa8d27c3",
"timestamp": 1458306069.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 1,
"msg": {
"agent": "bonnegent",
"issue": {
"assignee": None,
"blocks": [],
"comments": [],
"content": "Hi,\r\nI want to use Pagure with Python 3. Is it in roadmap ?\r\n\r\nI saw a branch 'py3_work' and I have some knowledge with python2/3, maybe I can help ?\r\n\r\nS.Bonnegent",
"date_created": "1458305857",
"depends": [],
"id": 849,
"private": False,
"status": "Open",
"tags": [],
"title": "pagure on python3",
"user": {
"fullname": "Sébastien Bonnegent",
"name": "bonnegent"
}
},
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
}
},
"msg_id": "2016-42fc29b6-da18-4c69-aa04-85e510531ad0",
"timestamp": 1458305858.0,
"topic": "io.pagure.prod.pagure.issue.new"
},
{
"i": 1,
"msg": {
"agent": "lsedlar",
"flag": {
"comment": "Build successful",
"date_created": "1458304953",
"percent": "100",
"pull_request_uid": "08dcb7bf15574051ad1fef8af87cd3c7",
"uid": "8881877e7d564cf0b9fc89964f709003",
"url": "http://jenkins.fedorainfracloud.org/job/pungi/148/",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
},
"username": "Jenkins"
},
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "lorax-find",
"closed_at": None,
"closed_by": None,
"comments": [],
"commit_start": "6e2d96d15d0b386c64658707eacae42007c56504",
"commit_stop": "6e2d96d15d0b386c64658707eacae42007c56504",
"date_created": "1458304909",
"id": 234,
"project": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1447057736",
"description": "Distribution compose tool",
"id": 244,
"name": "pungi",
"parent": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": "http://46.101.221.203:8000/",
"always_merge": False,
"issue_tracker": False,
"project_documentation": True,
"pull_requests": False
},
"tags": [],
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
"status": "Open",
"title": "[createiso] Add hack to get template dir from lorax",
"uid": "08dcb7bf15574051ad1fef8af87cd3c7",
"updated_on": "1458304912",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
},
"msg_id": "2016-a7320bfb-67ce-4287-845b-55c3fd01d7da",
"timestamp": 1458304954.0,
"topic": "io.pagure.prod.pagure.pull-request.flag.added"
},
{
"i": 1,
"msg": {
"agent": "lsedlar",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "lorax-find",
"closed_at": None,
"closed_by": None,
"comments": [],
"commit_start": None,
"commit_stop": None,
"date_created": "1458304909",
"id": 234,
"project": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1447057736",
"description": "Distribution compose tool",
"id": 244,
"name": "pungi",
"parent": {
"date_created": "1431529680",
"description": "Distribution compose tool",
"id": 8,
"name": "pungi",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": True,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"releng compose distribution"
],
"user": {
"fullname": "Dennis Gilmore",
"name": "ausil"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": "http://46.101.221.203:8000/",
"always_merge": False,
"issue_tracker": False,
"project_documentation": True,
"pull_requests": False
},
"tags": [],
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
"status": "Open",
"title": "[createiso] Add hack to get template dir from lorax",
"uid": "08dcb7bf15574051ad1fef8af87cd3c7",
"updated_on": "1458304909",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
},
"msg_id": "2016-25c5e4f7-ab6b-4f40-903d-35553ea0c9ca",
"timestamp": 1458304911.0,
"topic": "io.pagure.prod.pagure.pull-request.new"
},
{
"i": 1,
"msg": {
"agent": "aavrug",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "master",
"closed_at": None,
"closed_by": None,
"comments": [
{
"comment": "Missing indentation for the content of the form :)",
"commit": "08d5e7567a6b1495be7d1da47ae8f5f5e0d079af",
"date_created": "1458228021",
"edited_on": None,
"editor": None,
"filename": "pagure/templates/repo_master.html",
"id": 3452,
"line": 13,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458228047",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3453,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Instead of having these in the form w/ hidden fields, maybe we could place them in the URL as we do in other places",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228080",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3454,
"line": 16,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo_user can be None, that's no problem, we just need to make sure it isn't ``''`` (ie: empty string)",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228121",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3455,
"line": 21,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Could/Should we check the value of ``watch``?",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228145",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3456,
"line": 14,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Well this doesn't tell us much since the URL doesn't exists whether a ``foo`` project exists or not :)",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228184",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3457,
"line": 8,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "It redirects you, but you're not checking the output, you should ``follow_redirects=True`` and see what's in the HTML. Did the ``Watch`` flag changed? Was there a message flashed? Did it redirect you to the right page?",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228321",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3458,
"line": 46,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458237791",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3464,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been rebased",
"commit": None,
"date_created": "1458239574",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3465,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458240388",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3466,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Check the ConfirmationForm ;-)",
"commit": "95df7c1b30c8288333a2d5247ddbcc3df7abf2a0",
"date_created": "1458240462",
"edited_on": None,
"editor": None,
"filename": "pagure/forms.py",
"id": 3467,
"line": 5,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Check the login method, there is a way to check that the previous_url is sane",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240532",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3468,
"line": 11,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo cannot be None, otherwise we would have a 404 since the URL wouldn't hit here :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240573",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3469,
"line": 16,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Let's make this ``str(watch)`` to be sure :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240600",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3470,
"line": 19,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No need for the ``if user``, just specify it, if ``user`` is None, it'll know what to do, check the code ;-)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240664",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3471,
"line": 25,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "``rollback()`` shouldn't be needed for a ``PagureException`` but it would for a sqlalchemy error",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240714",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3472,
"line": 38,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Split this string over two lines? (same below?)",
"commit": "3c839ba86dcd909f4bc23d2ab7d9bc87d4442315",
"date_created": "1458240771",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3473,
"line": 42,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275019",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3477,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275207",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3478,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458281234",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3479,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Whenever you get time just review this. I have taken so much time for this issue, so now thinking that if this will merge then I will move to the next issue.",
"commit": None,
"date_created": "1458298555",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3486,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "This should be taken care of in ``notify.py`` no?",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298756",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3487,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No that is for the user is watching or not for watch/Unwatch button.",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298870",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3488,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Let's be consistent about our URL schemas:\r\n\r\nI propose:\r\n\r\n @APP.route('/<repo>/settings/watch', methods=['POST'])\r\n @APP.route('/fork/<username>/<repo>/settings/watch', methods=['POST'])",
"commit": "2158eb3ee1cafc26cd2632244efa655dc42c224b",
"date_created": "1458298899",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3489,
"line": 13,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Yes but we remove all the users not watching in ``notify.py`` if I read the code correctly",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298950",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3490,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "https://pagure.io/pagure/pull-request/843#_5,15 that is for this and It is not related to notify.",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458299121",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3491,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been rebased",
"commit": None,
"date_created": "1458303598",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3492,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
],
"commit_start": "bbc08a4648aa96619823b1c7489c56d390b4934b",
"commit_stop": "26b54098ac3024d7f799a13ceec5d04c1c6c35f7",
"date_created": "1458226010",
"id": 843,
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449051695",
"description": "A git centered forge",
"id": 278,
"name": "pagure",
"parent": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
"status": "Open",
"title": "Added watch feature.",
"uid": "53e47f3c13874c9eb39675973e21e711",
"updated_on": "1458303597",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
},
"msg_id": "2016-0c9940a9-ef5b-47fc-a2e1-57bff4ecf064",
"timestamp": 1458303598.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 2,
"msg": {
"agent": "aavrug",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "master",
"closed_at": None,
"closed_by": None,
"comments": [
{
"comment": "Missing indentation for the content of the form :)",
"commit": "08d5e7567a6b1495be7d1da47ae8f5f5e0d079af",
"date_created": "1458228021",
"edited_on": None,
"editor": None,
"filename": "pagure/templates/repo_master.html",
"id": 3452,
"line": 13,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458228047",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3453,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Instead of having these in the form w/ hidden fields, maybe we could place them in the URL as we do in other places",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228080",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3454,
"line": 16,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo_user can be None, that's no problem, we just need to make sure it isn't ``''`` (ie: empty string)",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228121",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3455,
"line": 21,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Could/Should we check the value of ``watch``?",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228145",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3456,
"line": 14,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Well this doesn't tell us much since the URL doesn't exists whether a ``foo`` project exists or not :)",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228184",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3457,
"line": 8,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "It redirects you, but you're not checking the output, you should ``follow_redirects=True`` and see what's in the HTML. Did the ``Watch`` flag changed? Was there a message flashed? Did it redirect you to the right page?",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228321",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3458,
"line": 46,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458237791",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3464,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been rebased",
"commit": None,
"date_created": "1458239574",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3465,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458240388",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3466,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Check the ConfirmationForm ;-)",
"commit": "95df7c1b30c8288333a2d5247ddbcc3df7abf2a0",
"date_created": "1458240462",
"edited_on": None,
"editor": None,
"filename": "pagure/forms.py",
"id": 3467,
"line": 5,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Check the login method, there is a way to check that the previous_url is sane",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240532",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3468,
"line": 11,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo cannot be None, otherwise we would have a 404 since the URL wouldn't hit here :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240573",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3469,
"line": 16,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Let's make this ``str(watch)`` to be sure :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240600",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3470,
"line": 19,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No need for the ``if user``, just specify it, if ``user`` is None, it'll know what to do, check the code ;-)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240664",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3471,
"line": 25,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "``rollback()`` shouldn't be needed for a ``PagureException`` but it would for a sqlalchemy error",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240714",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3472,
"line": 38,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Split this string over two lines? (same below?)",
"commit": "3c839ba86dcd909f4bc23d2ab7d9bc87d4442315",
"date_created": "1458240771",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3473,
"line": 42,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275019",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3477,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275207",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3478,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458281234",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3479,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Whenever you get time just review this. I have taken so much time for this issue, so now thinking that if this will merge then I will move to the next issue.",
"commit": None,
"date_created": "1458298555",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3486,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "This should be taken care of in ``notify.py`` no?",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298756",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3487,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No that is for the user is watching or not for watch/Unwatch button.",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298870",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3488,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Let's be consistent about our URL schemas:\r\n\r\nI propose:\r\n\r\n @APP.route('/<repo>/settings/watch', methods=['POST'])\r\n @APP.route('/fork/<username>/<repo>/settings/watch', methods=['POST'])",
"commit": "2158eb3ee1cafc26cd2632244efa655dc42c224b",
"date_created": "1458298899",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3489,
"line": 13,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Yes but we remove all the users not watching in ``notify.py`` if I read the code correctly",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298950",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3490,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "https://pagure.io/pagure/pull-request/843#_5,15 that is for this and It is not related to notify.",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458299121",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3491,
"line": "35",
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
],
"commit_start": "498572b754599022f358b370bf66fe5c8c10db58",
"commit_stop": "d3b1f21fed867629d541ec1c1537c65d06851d7d",
"date_created": "1458226010",
"id": 843,
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449051695",
"description": "A git centered forge",
"id": 278,
"name": "pagure",
"parent": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
"status": "Open",
"title": "Added watch feature.",
"uid": "53e47f3c13874c9eb39675973e21e711",
"updated_on": "1458298091",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
},
"msg_id": "2016-7588d752-395c-408d-869a-967155cce0ab",
"timestamp": 1458299121.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 1,
"msg": {
"agent": "pingou",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "master",
"closed_at": None,
"closed_by": None,
"comments": [
{
"comment": "Missing indentation for the content of the form :)",
"commit": "08d5e7567a6b1495be7d1da47ae8f5f5e0d079af",
"date_created": "1458228021",
"edited_on": None,
"editor": None,
"filename": "pagure/templates/repo_master.html",
"id": 3452,
"line": 13,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458228047",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3453,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Instead of having these in the form w/ hidden fields, maybe we could place them in the URL as we do in other places",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228080",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3454,
"line": 16,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo_user can be None, that's no problem, we just need to make sure it isn't ``''`` (ie: empty string)",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228121",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3455,
"line": 21,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Could/Should we check the value of ``watch``?",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228145",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3456,
"line": 14,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Well this doesn't tell us much since the URL doesn't exists whether a ``foo`` project exists or not :)",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228184",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3457,
"line": 8,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "It redirects you, but you're not checking the output, you should ``follow_redirects=True`` and see what's in the HTML. Did the ``Watch`` flag changed? Was there a message flashed? Did it redirect you to the right page?",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228321",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3458,
"line": 46,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458237791",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3464,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been rebased",
"commit": None,
"date_created": "1458239574",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3465,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458240388",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3466,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Check the ConfirmationForm ;-)",
"commit": "95df7c1b30c8288333a2d5247ddbcc3df7abf2a0",
"date_created": "1458240462",
"edited_on": None,
"editor": None,
"filename": "pagure/forms.py",
"id": 3467,
"line": 5,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Check the login method, there is a way to check that the previous_url is sane",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240532",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3468,
"line": 11,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo cannot be None, otherwise we would have a 404 since the URL wouldn't hit here :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240573",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3469,
"line": 16,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Let's make this ``str(watch)`` to be sure :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240600",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3470,
"line": 19,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No need for the ``if user``, just specify it, if ``user`` is None, it'll know what to do, check the code ;-)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240664",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3471,
"line": 25,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "``rollback()`` shouldn't be needed for a ``PagureException`` but it would for a sqlalchemy error",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240714",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3472,
"line": 38,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Split this string over two lines? (same below?)",
"commit": "3c839ba86dcd909f4bc23d2ab7d9bc87d4442315",
"date_created": "1458240771",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3473,
"line": 42,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275019",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3477,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275207",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3478,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458281234",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3479,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Whenever you get time just review this. I have taken so much time for this issue, so now thinking that if this will merge then I will move to the next issue.",
"commit": None,
"date_created": "1458298555",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3486,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "This should be taken care of in ``notify.py`` no?",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298756",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3487,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No that is for the user is watching or not for watch/Unwatch button.",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298870",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3488,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Let's be consistent about our URL schemas:\r\n\r\nI propose:\r\n\r\n @APP.route('/<repo>/settings/watch', methods=['POST'])\r\n @APP.route('/fork/<username>/<repo>/settings/watch', methods=['POST'])",
"commit": "2158eb3ee1cafc26cd2632244efa655dc42c224b",
"date_created": "1458298899",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3489,
"line": 13,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Yes but we remove all the users not watching in ``notify.py`` if I read the code correctly",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298950",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3490,
"line": "35",
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
}
],
"commit_start": "498572b754599022f358b370bf66fe5c8c10db58",
"commit_stop": "d3b1f21fed867629d541ec1c1537c65d06851d7d",
"date_created": "1458226010",
"id": 843,
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449051695",
"description": "A git centered forge",
"id": 278,
"name": "pagure",
"parent": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
"status": "Open",
"title": "Added watch feature.",
"uid": "53e47f3c13874c9eb39675973e21e711",
"updated_on": "1458298091",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
},
"msg_id": "2016-d07f4b41-9b92-43f5-9dd9-013298bf0f50",
"timestamp": 1458298951.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 5,
"msg": {
"agent": "pingou",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "master",
"closed_at": None,
"closed_by": None,
"comments": [
{
"comment": "Missing indentation for the content of the form :)",
"commit": "08d5e7567a6b1495be7d1da47ae8f5f5e0d079af",
"date_created": "1458228021",
"edited_on": None,
"editor": None,
"filename": "pagure/templates/repo_master.html",
"id": 3452,
"line": 13,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458228047",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3453,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Instead of having these in the form w/ hidden fields, maybe we could place them in the URL as we do in other places",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228080",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3454,
"line": 16,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo_user can be None, that's no problem, we just need to make sure it isn't ``''`` (ie: empty string)",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228121",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3455,
"line": 21,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Could/Should we check the value of ``watch``?",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228145",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3456,
"line": 14,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Well this doesn't tell us much since the URL doesn't exists whether a ``foo`` project exists or not :)",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228184",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3457,
"line": 8,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "It redirects you, but you're not checking the output, you should ``follow_redirects=True`` and see what's in the HTML. Did the ``Watch`` flag changed? Was there a message flashed? Did it redirect you to the right page?",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228321",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3458,
"line": 46,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458237791",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3464,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been rebased",
"commit": None,
"date_created": "1458239574",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3465,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458240388",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3466,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Check the ConfirmationForm ;-)",
"commit": "95df7c1b30c8288333a2d5247ddbcc3df7abf2a0",
"date_created": "1458240462",
"edited_on": None,
"editor": None,
"filename": "pagure/forms.py",
"id": 3467,
"line": 5,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Check the login method, there is a way to check that the previous_url is sane",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240532",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3468,
"line": 11,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo cannot be None, otherwise we would have a 404 since the URL wouldn't hit here :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240573",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3469,
"line": 16,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Let's make this ``str(watch)`` to be sure :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240600",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3470,
"line": 19,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No need for the ``if user``, just specify it, if ``user`` is None, it'll know what to do, check the code ;-)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240664",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3471,
"line": 25,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "``rollback()`` shouldn't be needed for a ``PagureException`` but it would for a sqlalchemy error",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240714",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3472,
"line": 38,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Split this string over two lines? (same below?)",
"commit": "3c839ba86dcd909f4bc23d2ab7d9bc87d4442315",
"date_created": "1458240771",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3473,
"line": 42,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275019",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3477,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275207",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3478,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458281234",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3479,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Whenever you get time just review this. I have taken so much time for this issue, so now thinking that if this will merge then I will move to the next issue.",
"commit": None,
"date_created": "1458298555",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3486,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "This should be taken care of in ``notify.py`` no?",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298756",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3487,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No that is for the user is watching or not for watch/Unwatch button.",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298870",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3488,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Let's be consistent about our URL schemas:\r\n\r\nI propose:\r\n\r\n @APP.route('/<repo>/settings/watch', methods=['POST'])\r\n @APP.route('/fork/<username>/<repo>/settings/watch', methods=['POST'])",
"commit": "2158eb3ee1cafc26cd2632244efa655dc42c224b",
"date_created": "1458298899",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3489,
"line": "13",
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
}
],
"commit_start": "498572b754599022f358b370bf66fe5c8c10db58",
"commit_stop": "d3b1f21fed867629d541ec1c1537c65d06851d7d",
"date_created": "1458226010",
"id": 843,
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449051695",
"description": "A git centered forge",
"id": 278,
"name": "pagure",
"parent": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
"status": "Open",
"title": "Added watch feature.",
"uid": "53e47f3c13874c9eb39675973e21e711",
"updated_on": "1458298091",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
},
"msg_id": "2016-fd909fc4-180a-4f4d-a68e-27411ebcad75",
"timestamp": 1458298899.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 1,
"msg": {
"agent": "aavrug",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "master",
"closed_at": None,
"closed_by": None,
"comments": [
{
"comment": "Missing indentation for the content of the form :)",
"commit": "08d5e7567a6b1495be7d1da47ae8f5f5e0d079af",
"date_created": "1458228021",
"edited_on": None,
"editor": None,
"filename": "pagure/templates/repo_master.html",
"id": 3452,
"line": 13,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458228047",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3453,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Instead of having these in the form w/ hidden fields, maybe we could place them in the URL as we do in other places",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228080",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3454,
"line": 16,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo_user can be None, that's no problem, we just need to make sure it isn't ``''`` (ie: empty string)",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228121",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3455,
"line": 21,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Could/Should we check the value of ``watch``?",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228145",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3456,
"line": 14,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Well this doesn't tell us much since the URL doesn't exists whether a ``foo`` project exists or not :)",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228184",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3457,
"line": 8,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "It redirects you, but you're not checking the output, you should ``follow_redirects=True`` and see what's in the HTML. Did the ``Watch`` flag changed? Was there a message flashed? Did it redirect you to the right page?",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228321",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3458,
"line": 46,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458237791",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3464,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been rebased",
"commit": None,
"date_created": "1458239574",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3465,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458240388",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3466,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Check the ConfirmationForm ;-)",
"commit": "95df7c1b30c8288333a2d5247ddbcc3df7abf2a0",
"date_created": "1458240462",
"edited_on": None,
"editor": None,
"filename": "pagure/forms.py",
"id": 3467,
"line": 5,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Check the login method, there is a way to check that the previous_url is sane",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240532",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3468,
"line": 11,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo cannot be None, otherwise we would have a 404 since the URL wouldn't hit here :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240573",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3469,
"line": 16,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Let's make this ``str(watch)`` to be sure :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240600",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3470,
"line": 19,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No need for the ``if user``, just specify it, if ``user`` is None, it'll know what to do, check the code ;-)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240664",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3471,
"line": 25,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "``rollback()`` shouldn't be needed for a ``PagureException`` but it would for a sqlalchemy error",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240714",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3472,
"line": 38,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Split this string over two lines? (same below?)",
"commit": "3c839ba86dcd909f4bc23d2ab7d9bc87d4442315",
"date_created": "1458240771",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3473,
"line": 42,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275019",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3477,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275207",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3478,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458281234",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3479,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Whenever you get time just review this. I have taken so much time for this issue, so now thinking that if this will merge then I will move to the next issue.",
"commit": None,
"date_created": "1458298555",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3486,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "This should be taken care of in ``notify.py`` no?",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298756",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3487,
"line": 35,
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No that is for the user is watching or not for watch/Unwatch button.",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298870",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3488,
"line": "35",
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
],
"commit_start": "498572b754599022f358b370bf66fe5c8c10db58",
"commit_stop": "d3b1f21fed867629d541ec1c1537c65d06851d7d",
"date_created": "1458226010",
"id": 843,
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449051695",
"description": "A git centered forge",
"id": 278,
"name": "pagure",
"parent": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
"status": "Open",
"title": "Added watch feature.",
"uid": "53e47f3c13874c9eb39675973e21e711",
"updated_on": "1458298091",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
},
"msg_id": "2016-9e63a009-4e66-480c-8eca-ab032cff462b",
"timestamp": 1458298872.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 3,
"msg": {
"agent": "pingou",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "master",
"closed_at": None,
"closed_by": None,
"comments": [
{
"comment": "Missing indentation for the content of the form :)",
"commit": "08d5e7567a6b1495be7d1da47ae8f5f5e0d079af",
"date_created": "1458228021",
"edited_on": None,
"editor": None,
"filename": "pagure/templates/repo_master.html",
"id": 3452,
"line": 13,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458228047",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3453,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Instead of having these in the form w/ hidden fields, maybe we could place them in the URL as we do in other places",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228080",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3454,
"line": 16,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo_user can be None, that's no problem, we just need to make sure it isn't ``''`` (ie: empty string)",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228121",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3455,
"line": 21,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Could/Should we check the value of ``watch``?",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228145",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3456,
"line": 14,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Well this doesn't tell us much since the URL doesn't exists whether a ``foo`` project exists or not :)",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228184",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3457,
"line": 8,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "It redirects you, but you're not checking the output, you should ``follow_redirects=True`` and see what's in the HTML. Did the ``Watch`` flag changed? Was there a message flashed? Did it redirect you to the right page?",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228321",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3458,
"line": 46,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458237791",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3464,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been rebased",
"commit": None,
"date_created": "1458239574",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3465,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458240388",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3466,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Check the ConfirmationForm ;-)",
"commit": "95df7c1b30c8288333a2d5247ddbcc3df7abf2a0",
"date_created": "1458240462",
"edited_on": None,
"editor": None,
"filename": "pagure/forms.py",
"id": 3467,
"line": 5,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Check the login method, there is a way to check that the previous_url is sane",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240532",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3468,
"line": 11,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo cannot be None, otherwise we would have a 404 since the URL wouldn't hit here :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240573",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3469,
"line": 16,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Let's make this ``str(watch)`` to be sure :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240600",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3470,
"line": 19,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No need for the ``if user``, just specify it, if ``user`` is None, it'll know what to do, check the code ;-)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240664",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3471,
"line": 25,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "``rollback()`` shouldn't be needed for a ``PagureException`` but it would for a sqlalchemy error",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240714",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3472,
"line": 38,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Split this string over two lines? (same below?)",
"commit": "3c839ba86dcd909f4bc23d2ab7d9bc87d4442315",
"date_created": "1458240771",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3473,
"line": 42,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275019",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3477,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275207",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3478,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458281234",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3479,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Whenever you get time just review this. I have taken so much time for this issue, so now thinking that if this will merge then I will move to the next issue.",
"commit": None,
"date_created": "1458298555",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3486,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "This should be taken care of in ``notify.py`` no?",
"commit": "e9cd199e6d8e4e684b6d9b7440b7d8d3296699e4",
"date_created": "1458298756",
"edited_on": None,
"editor": None,
"filename": "pagure/lib/model.py",
"id": 3487,
"line": "35",
"notification": False,
"parent": None,
"tree": "00a6f09a5e31549784c6f61c48723942e7c108cf",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
}
],
"commit_start": "498572b754599022f358b370bf66fe5c8c10db58",
"commit_stop": "d3b1f21fed867629d541ec1c1537c65d06851d7d",
"date_created": "1458226010",
"id": 843,
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449051695",
"description": "A git centered forge",
"id": 278,
"name": "pagure",
"parent": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
"status": "Open",
"title": "Added watch feature.",
"uid": "53e47f3c13874c9eb39675973e21e711",
"updated_on": "1458298091",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
},
"msg_id": "2016-b60b2056-4228-404e-bae0-92b194a81fe9",
"timestamp": 1458298757.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 4,
"msg": {
"agent": "aavrug",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "master",
"closed_at": None,
"closed_by": None,
"comments": [
{
"comment": "Missing indentation for the content of the form :)",
"commit": "08d5e7567a6b1495be7d1da47ae8f5f5e0d079af",
"date_created": "1458228021",
"edited_on": None,
"editor": None,
"filename": "pagure/templates/repo_master.html",
"id": 3452,
"line": 13,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458228047",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3453,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Instead of having these in the form w/ hidden fields, maybe we could place them in the URL as we do in other places",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228080",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3454,
"line": 16,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo_user can be None, that's no problem, we just need to make sure it isn't ``''`` (ie: empty string)",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228121",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3455,
"line": 21,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Could/Should we check the value of ``watch``?",
"commit": "7ed981c2ce9e66f1637cba224455d45c611b660a",
"date_created": "1458228145",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3456,
"line": 14,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Well this doesn't tell us much since the URL doesn't exists whether a ``foo`` project exists or not :)",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228184",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3457,
"line": 8,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "It redirects you, but you're not checking the output, you should ``follow_redirects=True`` and see what's in the HTML. Did the ``Watch`` flag changed? Was there a message flashed? Did it redirect you to the right page?",
"commit": "9038eddfefdff0cf827a27ee38e05b8f638b2fe3",
"date_created": "1458228321",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3458,
"line": 46,
"notification": False,
"parent": None,
"tree": "a02e041005632948452b5e55b7257543cb496dbb",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458237791",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3464,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been rebased",
"commit": None,
"date_created": "1458239574",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3465,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458240388",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3466,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Check the ConfirmationForm ;-)",
"commit": "95df7c1b30c8288333a2d5247ddbcc3df7abf2a0",
"date_created": "1458240462",
"edited_on": None,
"editor": None,
"filename": "pagure/forms.py",
"id": 3467,
"line": 5,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Check the login method, there is a way to check that the previous_url is sane",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240532",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3468,
"line": 11,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "repo cannot be None, otherwise we would have a 404 since the URL wouldn't hit here :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240573",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3469,
"line": 16,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Let's make this ``str(watch)`` to be sure :)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240600",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3470,
"line": 19,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "No need for the ``if user``, just specify it, if ``user`` is None, it'll know what to do, check the code ;-)",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240664",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3471,
"line": 25,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "``rollback()`` shouldn't be needed for a ``PagureException`` but it would for a sqlalchemy error",
"commit": "057e1ff0685bfdfd71f7525663c6b7da2ce11373",
"date_created": "1458240714",
"edited_on": None,
"editor": None,
"filename": "pagure/ui/repo.py",
"id": 3472,
"line": 38,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Split this string over two lines? (same below?)",
"commit": "3c839ba86dcd909f4bc23d2ab7d9bc87d4442315",
"date_created": "1458240771",
"edited_on": None,
"editor": None,
"filename": "tests/test_pagure_flask_ui_repo.py",
"id": 3473,
"line": 42,
"notification": False,
"parent": None,
"tree": "389966f3da0423bc76d6198e7283266d47a28c1c",
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275019",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3477,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458275207",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3478,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Pull-Request has been updated",
"commit": None,
"date_created": "1458281234",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3479,
"line": None,
"notification": True,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
{
"comment": "Whenever you get time just review this. I have taken so much time for this issue, so now thinking that if this will merge then I will move to the next issue.",
"commit": None,
"date_created": "1458298555",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3486,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
],
"commit_start": "498572b754599022f358b370bf66fe5c8c10db58",
"commit_stop": "d3b1f21fed867629d541ec1c1537c65d06851d7d",
"date_created": "1458226010",
"id": 843,
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1449051695",
"description": "A git centered forge",
"id": 278,
"name": "pagure",
"parent": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [],
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
},
"status": "Open",
"title": "Added watch feature.",
"uid": "53e47f3c13874c9eb39675973e21e711",
"updated_on": "1458298091",
"user": {
"fullname": "Gaurav Kumar",
"name": "aavrug"
}
}
},
"msg_id": "2016-78a49276-f52d-45f1-aacb-6f9783338a2d",
"timestamp": 1458298555.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
},
{
"i": 3,
"msg": {
"agent": "pingou",
"issue": {
"assignee": None,
"blocks": [],
"comments": [
{
"comment": "Hmmm, i can reproduce this one on pagure.io, but not in my local instance. It may have been fixed . Or there is some config difference between the two :(\r\n\r\n",
"date_created": "1458032385",
"edited_on": None,
"editor": None,
"id": 1950,
"parent": None,
"user": {
"fullname": "ryan lerch",
"name": "ryanlerch"
}
},
{
"comment": "I can't replicate in my local instance either. I think the difference in configuration is that locally the form gets submitted with regular post and redirect, so the page reloads and form clears. On pagure.io, the comments are submitted without page reload.",
"date_created": "1458295390",
"edited_on": None,
"editor": None,
"id": 2034,
"parent": None,
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
{
"comment": "You can run the SSE server by using\r\n\r\n PAGURE_CONFIG=../config PYTHONPATH=. python ev-server/pagure-stream-server.py\r\n",
"date_created": "1458297874",
"edited_on": None,
"editor": None,
"id": 2035,
"parent": None,
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "I found the bug and the fix :)",
"date_created": "1458298137",
"edited_on": None,
"editor": None,
"id": 2036,
"parent": None,
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
}
],
"content": "Steps to reproduce:\r\n\r\n1. On issue page, write some text in comment input.\r\n2. Display preview\r\n3. Hit *Update issue* button\r\n\r\nThe comment gets added, but the preview is still displayed the exact same way. I would expect the view to switch back to clear textarea.",
"date_created": "1458031264",
"depends": [],
"id": 833,
"private": False,
"status": "Open",
"tags": [],
"title": "Submitting issue comment does not clear preview",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
}
},
"msg_id": "2016-59add39c-492c-4cbe-a5ff-94951c626733",
"timestamp": 1458298137.0,
"topic": "io.pagure.prod.pagure.issue.comment.added"
},
{
"i": 4,
"msg": {
"agent": "pingou",
"issue": {
"assignee": None,
"blocks": [],
"comments": [
{
"comment": "Hmmm, i can reproduce this one on pagure.io, but not in my local instance. It may have been fixed . Or there is some config difference between the two :(\r\n\r\n",
"date_created": "1458032385",
"edited_on": None,
"editor": None,
"id": 1950,
"parent": None,
"user": {
"fullname": "ryan lerch",
"name": "ryanlerch"
}
},
{
"comment": "I can't replicate in my local instance either. I think the difference in configuration is that locally the form gets submitted with regular post and redirect, so the page reloads and form clears. On pagure.io, the comments are submitted without page reload.",
"date_created": "1458295390",
"edited_on": None,
"editor": None,
"id": 2034,
"parent": None,
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
{
"comment": "You can run the SSE server by using\r\n\r\n PAGURE_CONFIG=../config PYTHONPATH=. python ev-server/pagure-stream-server.py\r\n",
"date_created": "1458297874",
"edited_on": None,
"editor": None,
"id": 2035,
"parent": None,
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
}
],
"content": "Steps to reproduce:\r\n\r\n1. On issue page, write some text in comment input.\r\n2. Display preview\r\n3. Hit *Update issue* button\r\n\r\nThe comment gets added, but the preview is still displayed the exact same way. I would expect the view to switch back to clear textarea.",
"date_created": "1458031264",
"depends": [],
"id": 833,
"private": False,
"status": "Open",
"tags": [],
"title": "Submitting issue comment does not clear preview",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
}
},
"msg_id": "2016-c8ff3789-462f-4346-abd5-cac4761933d1",
"timestamp": 1458297874.0,
"topic": "io.pagure.prod.pagure.issue.comment.added"
},
{
"i": 2,
"msg": {
"agent": "pingou",
"pullrequest": {
"assignee": None,
"branch": "master",
"branch_from": "scroll-to-highlighted",
"closed_at": "1458297187",
"closed_by": None,
"comments": [
{
"comment": "Visit [a page with line range highlighted](https://pagure.io/pagure/blob/master/f/pagure/__init__.py#_93-98), and the browser will not scroll to it automatically. This patch should fix that.\r\n\r\nIf you don't like the animation, replace that line with `window.scroll(0, offset);`.",
"commit": None,
"date_created": "1458294982",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3482,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
{
"comment": "I like the idea but it seems it doesn't work for me if the lines selected are not part of the first file (in my local test I selected lines in the 5th files).",
"commit": None,
"date_created": "1458297107",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3483,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
{
"comment": "Nevermind I tested in on a PR where I think we should also add it, but this is for viewing a file and works like a charm :)\r\n\r\nThanks!",
"commit": None,
"date_created": "1458297179",
"edited_on": None,
"editor": None,
"filename": None,
"id": 3484,
"line": None,
"notification": False,
"parent": None,
"tree": None,
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
}
],
"commit_start": "0b01c0facd70179e17762d0f2a053280cae94427",
"commit_stop": "0b01c0facd70179e17762d0f2a053280cae94427",
"date_created": "1458294982",
"id": 848,
"project": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"remote_git": None,
"repo_from": {
"date_created": "1450686367",
"description": "A git centered forge",
"id": 293,
"name": "pagure",
"parent": {
"date_created": "1431549490",
"description": "A git centered forge",
"id": 10,
"name": "pagure",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"pagure",
"fedmsg",
"fedora-infra"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
}
},
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": False,
"project_documentation": True,
"pull_requests": False
},
"tags": [],
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
},
"status": "Merged",
"title": "Automatically scroll to highlighted range",
"uid": "59e41f825a9b42dfa6268c20d53c9f44",
"updated_on": "1458297185",
"user": {
"fullname": "Lubomír Sedlář",
"name": "lsedlar"
}
}
},
"msg_id": "2016-3f10a96f-0d00-47fd-95c1-7b78b8825f9d",
"timestamp": 1458297187.0,
"topic": "io.pagure.prod.pagure.pull-request.comment.added"
}
]
class TestPagureConglomeratorByOldStyleCommit(
fedmsg.tests.test_meta.ConglomerateBase):
expected = [
{
'categories': set(['pagure']),
'end_time': 1458324396.0,
'human_time': arrow.get(1458324396).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/fedora-hubs/3704095da807fc94f6ceff2f9d3c8d1de4888c22',
'packages': set([]),
'secondary_icon': None,
'start_time': 1458324396.0,
'subjective': u'rbean@redhat.com pushed to fedora-hubs (develop). "We need this to match our locally-stored datanommer topics so that the feed works."',
'subtitle': u'rbean@redhat.com pushed to fedora-hubs (develop). "We need this to match our locally-stored datanommer topics so that the feed works."',
'timestamp': 1458324396.0,
'topics': set(['io.pagure.prod.pagure.git.receive']),
'usernames': set(['rbean@redhat.com'])
}, {
'categories': set(['pagure']),
'end_time': 1458298728.0,
'human_time': arrow.get(1458292032).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/fedora-websites/commits',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/f507d9bd18d7298a62d4efd485ce9136dea6145d728077d74f349d8b8bb02605?s=64&d=retro',
'start_time': 1458288685.0,
'subjective': 'robyduck pushed 3 commits to the fedora-websites project',
'subtitle': 'robyduck pushed 3 commits to the fedora-websites project',
'timestamp': 1458292032.6666667,
'topics': set(['io.pagure.prod.pagure.git.receive']),
'usernames': set(['robyduck'])
}, {
'categories': set(['pagure']),
'end_time': 1458294681.0,
'human_time': arrow.get(1458294681).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/fedoramagazine-images/d71918d84ae82565837e9eaa8c42954c754fef4c',
'packages': set([]),
'secondary_icon': None,
'start_time': 1458294681.0,
'subjective': u'rlerch@redhat.com pushed to fedoramagazine-images (openvpn). "added openVPN image"',
'subtitle': u'rlerch@redhat.com pushed to fedoramagazine-images (openvpn). "added openVPN image"',
'timestamp': 1458294681.0,
'topics': set(['io.pagure.prod.pagure.git.receive']),
'usernames': set(['rlerch@redhat.com'])
}
]
originals = [
{
"i": 1,
"msg": {
"commit": {
"agent": "git",
"branch": "refs/heads/develop",
"email": "rbean@redhat.com",
"message": "We need this to match our locally-stored datanommer topics so that the feed works.",
"name": "Ralph Bean",
"path": "/srv/git/repositories/fedora-hubs.git",
"repo": {
"date_created": "1433438868",
"description": "Fedora Hubs",
"id": 50,
"name": "fedora-hubs",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": False,
"pull_requests": True
},
"tags": [
"fedora-infra"
],
"user": {
"fullname": "Remy DeCausemaker",
"name": "decause"
}
},
"rev": "3704095da807fc94f6ceff2f9d3c8d1de4888c22",
"seen": False,
"stats": {
"files": {
"fedmsg.d/base.py": {
"additions": 1,
"deletions": 1,
"lines": 2
}
},
"total": {
"additions": 1,
"deletions": 1,
"files": 1,
"lines": 2
}
},
"summary": "We need this to match our locally-stored datanommer topics so that the feed works.",
"username": None
}
},
"msg_id": "2016-9e3e2b6e-3155-4785-b6eb-7f493d1f46af",
"timestamp": 1458324396.0,
"topic": "io.pagure.prod.pagure.git.receive"
},
{
"i": 1,
"msg": {
"commit": {
"agent": "git",
"branch": "refs/heads/master",
"email": "robyduck@fedoraproject.org",
"message": "budget numbers title needs a hyperlink",
"name": "Robert Mayr",
"path": "/srv/git/repositories/fedora-websites.git",
"repo": {
"date_created": "1456583771",
"description": "Fedora Websites",
"id": 375,
"name": "fedora-websites",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"fedmsg",
"fedora-app"
],
"user": {
"fullname": "Robert Mayr",
"name": "robyduck"
}
},
"rev": "c1cdc52d08cb51b6f4d9f6fd5e34759590f8a9a4",
"seen": False,
"stats": {
"files": {
"budget.fedoraproject.org/data/content/index.html": {
"additions": 1,
"deletions": 1,
"lines": 2
}
},
"total": {
"additions": 1,
"deletions": 1,
"files": 1,
"lines": 2
}
},
"summary": "budget numbers title needs a hyperlink",
"username": None
}
},
"msg_id": "2016-78f3efd4-7842-4488-9899-7f23f1037aa6",
"timestamp": 1458298728.0,
"topic": "io.pagure.prod.pagure.git.receive"
},
{
"i": 1,
"msg": {
"commit": {
"agent": "git",
"branch": "refs/heads/openvpn",
"email": "rlerch@redhat.com",
"message": "added openVPN image",
"name": "Ryan Lerch",
"path": "/srv/git/repositories/fedoramagazine-images.git",
"repo": {
"date_created": "1440421477",
"description": "Cover Images for the Fedora Magazine",
"id": 147,
"name": "fedoramagazine-images",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"SVG",
"magazine",
"assets"
],
"user": {
"fullname": "ryan lerch",
"name": "ryanlerch"
}
},
"rev": "d71918d84ae82565837e9eaa8c42954c754fef4c",
"seen": False,
"stats": {
"files": {
"images/openvpn.svg": {
"additions": 1007,
"deletions": 0,
"lines": 1007
}
},
"total": {
"additions": 1007,
"deletions": 0,
"files": 1,
"lines": 1007
}
},
"summary": "added openVPN image",
"username": None
}
},
"msg_id": "2016-44a40335-b644-409e-ac1b-3bf4b421109e",
"timestamp": 1458294681.0,
"topic": "io.pagure.prod.pagure.git.receive"
},
{
"i": 1,
"msg": {
"commit": {
"agent": "git",
"branch": "refs/heads/master",
"email": "robyduck@fedoraproject.org",
"message": "fix anchor id for budget numbers",
"name": "Robert Mayr",
"path": "/srv/git/repositories/fedora-websites.git",
"repo": {
"date_created": "1456583771",
"description": "Fedora Websites",
"id": 375,
"name": "fedora-websites",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"fedmsg",
"fedora-app"
],
"user": {
"fullname": "Robert Mayr",
"name": "robyduck"
}
},
"rev": "ec1d633b89a4dfdc05a09bd01b3a84cbe6aecbf7",
"seen": False,
"stats": {
"files": {
"budget.fedoraproject.org/data/content/index.html": {
"additions": 1,
"deletions": 1,
"lines": 2
}
},
"total": {
"additions": 1,
"deletions": 1,
"files": 1,
"lines": 2
}
},
"summary": "fix anchor id for budget numbers",
"username": None
}
},
"msg_id": "2016-9ad5642e-b9d9-49fc-9d0a-0f9e22b34158",
"timestamp": 1458288685.0,
"topic": "io.pagure.prod.pagure.git.receive"
},
{
"i": 2,
"msg": {
"commit": {
"agent": "git",
"branch": "refs/heads/master",
"email": "robyduck@fedoraproject.org",
"message": "fix genshi markups for regional delegate titles",
"name": "Robert Mayr",
"path": "/srv/git/repositories/fedora-websites.git",
"repo": {
"date_created": "1456583771",
"description": "Fedora Websites",
"id": 375,
"name": "fedora-websites",
"parent": None,
"settings": {
"Enforce_signed-off_commits_in_pull-request": False,
"Minimum_score_to_merge_pull-request": -1,
"Only_assignee_can_merge_pull-request": False,
"Web-hooks": None,
"always_merge": False,
"issue_tracker": True,
"project_documentation": True,
"pull_requests": True
},
"tags": [
"fedmsg",
"fedora-app"
],
"user": {
"fullname": "Robert Mayr",
"name": "robyduck"
}
},
"rev": "e8523c28ea57af1a58f2e5aa4180338a5d5a6bd8",
"seen": False,
"stats": {
"files": {
"budget.fedoraproject.org/data/content/index.html": {
"additions": 4,
"deletions": 4,
"lines": 8
}
},
"total": {
"additions": 4,
"deletions": 4,
"files": 1,
"lines": 8
}
},
"summary": "fix genshi markups for regional delegate titles",
"username": None
}
},
"msg_id": "2016-e6dd6d1f-b013-4fe3-aa2b-09f275ada088",
"timestamp": 1458288685.0,
"topic": "io.pagure.prod.pagure.git.receive"
}
]
class TestPagureConglomeratorByNewStyleCommit(
fedmsg.tests.test_meta.ConglomerateBase):
expected = [{
'categories': set(['pagure']),
'end_time': 1457538778,
'human_time': arrow.get(1457538778).humanize(),
'icon': 'https://apps.fedoraproject.org/packages/images/icons/package_128x128.png',
'link': 'https://pagure.io/pagure/commits',
'packages': set([]),
'secondary_icon': 'https://seccdn.libravatar.org/avatar/01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c?s=64&d=retro',
'start_time': 1457538778,
'subjective': 'pingou pushed 5 commits to pagure (feature and master)',
'subtitle': 'pingou pushed 5 commits to pagure (feature and master)',
'timestamp': 1457538778,
'topics': set(['io.pagure.prod.pagure.git.receive']),
'usernames': set(['pingou']),
}]
originals = [{
"username": "pingou",
"i": 1,
"timestamp": 1457538778,
"msg_id": "2016-c854f690-5691-42e8-b488-2d65aef80fdc",
"topic": "io.pagure.prod.pagure.git.receive",
"msg": {
"forced": False,
"agent": "pingou",
"repo": {
"description": "test project #1",
"parent": None,
"settings": {
"Minimum_score_to_merge_pull-request": -1,
"Web-hooks": None,
"project_documentation": False,
"always_merge": True,
"pull_requests": True,
"Enforce_signed-off_commits_in_pull-request": False,
"Comment-editing": False,
"Only_assignee_can_merge_pull-request": False,
"issue_tracker": True
},
"tags": [
"fedora-infra",
"fedora"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
},
"date_created": "1426500194",
"id": 1,
"name": "pagure"
},
"end_commit": "edc02fbb423d3957d174c571896418f29fa169b8",
"branch": "refs/heads/master",
"total_commits": 3,
"start_commit": "b5e65479e4bd91554d8d3084bf378ffb6e4ef605"
}
}, {
"username": "pingou",
"i": 1,
"timestamp": 1457538778,
"msg_id": "2016-c854f690-5691-42e8-b488-2d65aef80fdc",
"topic": "io.pagure.prod.pagure.git.receive",
"msg": {
"forced": False,
"agent": "pingou",
"repo": {
"description": "test project #1",
"parent": None,
"settings": {
"Minimum_score_to_merge_pull-request": -1,
"Web-hooks": None,
"project_documentation": False,
"always_merge": True,
"pull_requests": True,
"Enforce_signed-off_commits_in_pull-request": False,
"Comment-editing": False,
"Only_assignee_can_merge_pull-request": False,
"issue_tracker": True
},
"tags": [
"fedora-infra",
"fedora"
],
"user": {
"fullname": "Pierre-YvesChibon",
"name": "pingou"
},
"date_created": "1426500194",
"id": 1,
"name": "pagure"
},
"end_commit": "edc02fbb423d3957d174c571896418f29fa169b8",
"branch": "refs/heads/feature",
"total_commits": 2,
"start_commit": "b5e65479e4bd91554d8d3084bf378ffb6e4ef605"
}
}]
|
fedora-infra/fedmsg_meta_fedora_infrastructure
|
fedmsg_meta_fedora_infrastructure/tests/conglomerate/pagure/test_pagure.py
|
Python
|
lgpl-2.1
| 218,531
|
[
"VisIt"
] |
e26ea89d957a068c29dbad9f5ed93a61d4c643796d9d8a47e017d0c6db727313
|
""" Runs few integrity checks
"""
__RCSID__ = "$Id$"
import re
import ast
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
AGENT_NAME = 'Transformation/ValidateOutputDataAgent'
class ValidateOutputDataAgent( AgentModule ):
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
self.integrityClient = DataIntegrityClient()
self.fc = FileCatalog()
self.transClient = TransformationClient()
self.fileCatalogClient = FileCatalogClient()
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = agentTSTypes
else:
self.transformationTypes = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
self.directoryLocations = sorted( self.am_getOption( 'DirectoryLocations', ['TransformationDB',
'MetadataCatalog'] ) )
self.activeStorages = sorted( self.am_getOption( 'ActiveSEs', [] ) )
self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
self.enableFlag = True
#############################################################################
def initialize( self ):
""" Sets defaults
"""
gLogger.info( "Will treat the following transformation types: %s" % str( self.transformationTypes ) )
gLogger.info( "Will search for directories in the following locations: %s" % str( self.directoryLocations ) )
gLogger.info( "Will check the following storage elements: %s" % str( self.activeStorages ) )
gLogger.info( "Will use %s as metadata tag name for TransformationID" % self.transfidmeta )
return S_OK()
#############################################################################
def execute( self ):
""" The VerifyOutputData execution method
"""
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
if not self.enableFlag == 'True':
self.log.info( "VerifyOutputData is disabled by configuration option 'EnableFlag'" )
return S_OK( 'Disabled via CS flag' )
gLogger.info( "-" * 40 )
self.updateWaitingIntegrity()
gLogger.info( "-" * 40 )
res = self.transClient.getTransformations( {'Status':'ValidatingOutput', 'Type':self.transformationTypes} )
if not res['OK']:
gLogger.error( "Failed to get ValidatingOutput transformations", res['Message'] )
return res
transDicts = res['Value']
if not transDicts:
gLogger.info( "No transformations found in ValidatingOutput status" )
return S_OK()
gLogger.info( "Found %s transformations in ValidatingOutput status" % len( transDicts ) )
for transDict in transDicts:
transID = transDict['TransformationID']
res = self.checkTransformationIntegrity( int( transID ) )
if not res['OK']:
gLogger.error( "Failed to perform full integrity check for transformation %d" % transID )
else:
self.finalizeCheck( transID )
gLogger.info( "-" * 40 )
return S_OK()
def updateWaitingIntegrity( self ):
""" Get 'WaitingIntegrity' transformations, update to 'ValidatedOutput'
"""
gLogger.info( "Looking for transformations in the WaitingIntegrity status to update" )
res = self.transClient.getTransformations( {'Status':'WaitingIntegrity'} )
if not res['OK']:
gLogger.error( "Failed to get WaitingIntegrity transformations", res['Message'] )
return res
transDicts = res['Value']
if not transDicts:
gLogger.info( "No transformations found in WaitingIntegrity status" )
return S_OK()
gLogger.info( "Found %s transformations in WaitingIntegrity status" % len( transDicts ) )
for transDict in transDicts:
transID = transDict['TransformationID']
gLogger.info( "-" * 40 )
res = self.integrityClient.getTransformationProblematics( int( transID ) )
if not res['OK']:
gLogger.error( "Failed to determine waiting problematics for transformation", res['Message'] )
elif not res['Value']:
res = self.transClient.setTransformationParameter( transID, 'Status', 'ValidatedOutput' )
if not res['OK']:
gLogger.error( "Failed to update status of transformation %s to ValidatedOutput" % ( transID ) )
else:
gLogger.info( "Updated status of transformation %s to ValidatedOutput" % ( transID ) )
else:
gLogger.info( "%d problematic files for transformation %s were found" % ( len( res['Value'] ), transID ) )
return
#############################################################################
#
# Get the transformation directories for checking
#
def getTransformationDirectories( self, transID ):
""" Get the directories for the supplied transformation from the transformation system
"""
directories = []
if 'TransformationDB' in self.directoryLocations:
res = self.transClient.getTransformationParameters( transID, ['OutputDirectories'] )
if not res['OK']:
gLogger.error( "Failed to obtain transformation directories", res['Message'] )
return res
if not isinstance( res['Value'], list ):
transDirectories = ast.literal_eval( res['Value'] )
else:
transDirectories = res['Value']
directories = self._addDirs( transID, transDirectories, directories )
if 'MetadataCatalog' in self.directoryLocations:
res = self.fileCatalogClient.findDirectoriesByMetadata( {self.transfidmeta:transID} )
if not res['OK']:
gLogger.error( "Failed to obtain metadata catalog directories", res['Message'] )
return res
transDirectories = res['Value']
directories = self._addDirs( transID, transDirectories, directories )
if not directories:
gLogger.info( "No output directories found" )
directories = sorted( directories )
return S_OK( directories )
@staticmethod
def _addDirs( transID, newDirs, existingDirs ):
for nDir in newDirs:
transStr = str( transID ).zfill( 8 )
if re.search( transStr, nDir ):
if not nDir in existingDirs:
existingDirs.append( nDir )
return existingDirs
#############################################################################
def checkTransformationIntegrity( self, transID ):
""" This method contains the real work
"""
gLogger.info( "-" * 40 )
gLogger.info( "Checking the integrity of transformation %s" % transID )
gLogger.info( "-" * 40 )
res = self.getTransformationDirectories( transID )
if not res['OK']:
return res
directories = res['Value']
if not directories:
return S_OK()
######################################################
#
# This check performs Catalog->SE for possible output directories
#
res = self.fc.exists( directories )
if not res['OK']:
gLogger.error( 'Failed to check directory existence', res['Message'] )
return res
for directory, error in res['Value']['Failed']:
gLogger.error( 'Failed to determine existance of directory', '%s %s' % ( directory, error ) )
if res['Value']['Failed']:
return S_ERROR( "Failed to determine the existance of directories" )
directoryExists = res['Value']['Successful']
for directory in sorted( directoryExists.keys() ):
if not directoryExists[directory]:
continue
iRes = self.integrityClient.catalogDirectoryToSE( directory )
if not iRes['OK']:
gLogger.error( iRes['Message'] )
return iRes
######################################################
#
# This check performs SE->Catalog for possible output directories
#
for storageElementName in sorted( self.activeStorages ):
res = self.integrityClient.storageDirectoryToCatalog( directories, storageElementName )
if not res['OK']:
gLogger.error( 'Failed to check integrity SE->Catalog', res['Message'] )
return res
gLogger.info( "-" * 40 )
gLogger.info( "Completed integrity check for transformation %s" % transID )
return S_OK()
def finalizeCheck( self, transID ):
""" Move to 'WaitingIntegrity' or 'ValidatedOutput'
"""
res = self.integrityClient.getTransformationProblematics( int( transID ) )
if not res['OK']:
gLogger.error( "Failed to determine whether there were associated problematic files", res['Message'] )
newStatus = ''
elif res['Value']:
gLogger.info( "%d problematic files for transformation %s were found" % ( len( res['Value'] ), transID ) )
newStatus = "WaitingIntegrity"
else:
gLogger.info( "No problematics were found for transformation %s" % transID )
newStatus = "ValidatedOutput"
if newStatus:
res = self.transClient.setTransformationParameter( transID, 'Status', newStatus )
if not res['OK']:
gLogger.error( "Failed to update status of transformation %s to %s" % ( transID, newStatus ) )
else:
gLogger.info( "Updated status of transformation %s to %s" % ( transID, newStatus ) )
gLogger.info( "-" * 40 )
return S_OK()
|
marcelovilaca/DIRAC
|
TransformationSystem/Agent/ValidateOutputDataAgent.py
|
Python
|
gpl-3.0
| 9,685
|
[
"DIRAC"
] |
c250a570e4c6538239d0099f06061d6ec9f356fde35309cf06ee16a2582e870c
|
#
# Copyright (C) 2010-2018 The ESPResSo project
# Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009,2010
# Max-Planck-Institute for Polymer Research, Theory Group
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
from espressomd import assert_features, electrostatics
import numpy
assert_features(["ELECTROSTATICS", "LENNARD_JONES"])
print("\n--->Setup system")
# System parameters
n_part = 200
n_ionpairs = n_part / 2
density = 0.5
time_step = 0.01
temp = 1.0
gamma = 1.0
l_bjerrum = 7.0
num_steps_equilibration = 1000
num_configs = 100
integ_steps_per_config = 1000
# Particle parameters
types = {"Anion": 0, "Cation": 1}
numbers = {"Anion": n_ionpairs, "Cation": n_ionpairs}
charges = {"Anion": -1.0, "Cation": 1.0}
lj_sigmas = {"Anion": 1.0, "Cation": 1.0}
lj_epsilons = {"Anion": 1.0, "Cation": 1.0}
WCA_cut = 2.**(1. / 6.)
lj_cuts = {"Anion": WCA_cut * lj_sigmas["Anion"],
"Cation": WCA_cut * lj_sigmas["Cation"]}
# Setup System
box_l = (n_part / density)**(1. / 3.)
system = espressomd.System(box_l=[box_l] * 3)
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.periodicity = [True, True, True]
system.time_step = time_step
system.cell_system.skin = 0.3
# Place particles
for i in range(int(n_ionpairs)):
system.part.add(id=len(system.part), type=types["Anion"],
pos=numpy.random.random(3) * box_l, q=charges["Anion"])
for i in range(int(n_ionpairs)):
system.part.add(id=len(system.part), type=types["Cation"],
pos=numpy.random.random(3) * box_l, q=charges["Cation"])
def combination_rule_epsilon(rule, eps1, eps2):
if rule == "Lorentz":
return (eps1 * eps2)**0.5
else:
return ValueError("No combination rule defined")
def combination_rule_sigma(rule, sig1, sig2):
if rule == "Berthelot":
return (sig1 + sig2) * 0.5
else:
return ValueError("No combination rule defined")
# Lennard-Jones interactions parameters
for s in [["Anion", "Cation"], ["Anion", "Anion"], ["Cation", "Cation"]]:
lj_sig = combination_rule_sigma(
"Berthelot", lj_sigmas[s[0]], lj_sigmas[s[1]])
lj_cut = combination_rule_sigma("Berthelot", lj_cuts[s[0]], lj_cuts[s[1]])
lj_eps = combination_rule_epsilon(
"Lorentz", lj_epsilons[s[0]], lj_epsilons[s[1]])
system.non_bonded_inter[types[s[0]], types[s[1]]].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
print("\n--->Lennard-Jones Equilibration")
max_sigma = max(lj_sigmas.values())
min_dist = 0.0
system.minimize_energy.init(f_max=0, gamma=10, max_steps=10,
max_displacement=max_sigma * 0.01)
while min_dist < max_sigma:
system.minimize_energy.minimize()
min_dist = system.analysis.min_dist()
# Set thermostat
system.thermostat.set_langevin(kT=temp, gamma=gamma, seed=42)
print("\n--->Tuning Electrostatics")
p3m = electrostatics.P3M(prefactor=l_bjerrum, accuracy=1e-3)
system.actors.add(p3m)
print("\n--->Temperature Equilibration")
system.time = 0.0
for i in range(int(num_steps_equilibration / 100)):
temp_measured = system.analysis.energy()['kinetic'] / ((3. / 2.) * n_part)
print("t={0:.1f}, E_total={1:.2f}, E_coulomb={2:.2f}, T_cur={3:.4f}"
.format(system.time, system.analysis.energy()['total'],
system.analysis.energy()['coulomb'], temp_measured))
system.integrator.run(100)
print("\n--->Integration")
system.time = 0.0
temp_measured = []
for i in range(num_configs):
temp_measured.append(system.analysis.energy()['kinetic']
/ ((3.0 / 2.0) * n_part))
print("t={0:.1f}, E_total={1:.2f}, E_coulomb={2:.2f}, T_cur={3:.4f}"
.format(system.time, system.analysis.energy()['total'],
system.analysis.energy()['coulomb'], temp_measured[-1]))
system.integrator.run(integ_steps_per_config)
# Internally append particle configuration
system.analysis.append()
print("\n--->Analysis")
# Calculate the averaged rdfs
rdf_bins = 100
r_min = 0.0
r_max = system.box_l[0] / 2.0
r, rdf_00 = system.analysis.rdf(rdf_type='<rdf>',
type_list_a=[types["Anion"]],
type_list_b=[types["Anion"]],
r_min=r_min,
r_max=r_max,
r_bins=rdf_bins)
r, rdf_01 = system.analysis.rdf(rdf_type='<rdf>',
type_list_a=[types["Anion"]],
type_list_b=[types["Cation"]],
r_min=r_min,
r_max=r_max,
r_bins=rdf_bins)
# Write out the data
numpy.savetxt('rdf.data', numpy.c_[r, rdf_00, rdf_01])
print("\n--->Written rdf.data")
print("\n--->Done")
|
mkuron/espresso
|
doc/tutorials/02-charged_system/scripts/nacl.py
|
Python
|
gpl-3.0
| 5,469
|
[
"ESPResSo"
] |
88b0aecdcc0a77528f2babc89b84ba4af2e817cf0d13cada7eb529698ac6e1cf
|
from wofrysrw.propagator.wavefront2D.srw_wavefront import WavefrontParameters, SRWWavefront
from wofrysrw.storage_ring.srw_light_source import SRWLightSource
from wofrysrw.storage_ring.srw_electron_beam import SRWElectronBeam
from oasys_srw.srwlib import *
'''
x = 0.0, #Transverse Coordinates of Gaussian Beam Center at Waist [m]
y = 0.0,
z = 0.0, #Longitudinal Coordinate of Waist [m]
xp = 0.0, #Average Angles of Gaussian Beam at Waist [rad]
yp = 0.0,
avgPhotEn = 12400, #5000 #Photon Energy [eV]
pulseEn = 0.001, #Energy per Pulse [J] - to be corrected
repRate = 1, #Rep. Rate [Hz] - to be corrected
polar = 1, #1- linear hoirizontal
sigX = 23e-06/2.35, #Horiz. RMS size at Waist [m]
sigY = 23e-06/2.35, #Vert. RMS size at Waist [m]
sigT = 10e-15, #Pulse duration [s] (not used?)
mx = 0, #Transverse Gauss-Hermite Mode Orders
my = 0
'''
class Polarization:
LINEAR_HORIZONTAL = 1
LINEAR_VERTICAL = 2
LINEAR_45_DEGREES = 3
LINEAR_135_DEGREES = 4
CIRCULAR_RIGHT = 5
CIRCULAR_LEFT = 6
@classmethod
def tuple(cls):
return ["Linear Horizontal",
"Linear Vertical",
"Linear 45\u00b0",
"Linear 135\u00b0",
"Circular Right",
"Circular Left"]
class SRWGaussianLightSource(SRWLightSource):
def __init__(self,
name="Undefined",
beam_center_at_waist_x = 0.0, #Transverse Coordinates of Gaussian Beam Center at Waist [m]
beam_center_at_waist_y = 0.0,
beam_center_at_waist_z = 0.0, #Longitudinal Coordinate of Waist [m]
average_angle_at_waist_x = 0.0, #Average Angles of Gaussian Beam at Waist [rad]
average_angle_at_waist_y = 0.0,
photon_energy = 12400,
energy_per_pulse = 0.001, #Energy per Pulse [J]
repetition_rate = 1, #[Hz]
polarization = Polarization.LINEAR_HORIZONTAL,
horizontal_sigma_at_waist = 1e-6,
vertical_sigma_at_waist = 1e-6,
pulse_duration = 10e-15, #[s]
transverse_gauss_hermite_mode_order_x = 0,
transverse_gauss_hermite_mode_order_y = 0
):
super().__init__(name,
electron_beam=None,
magnetic_structure=None)
self.beam_center_at_waist_x = beam_center_at_waist_x
self.beam_center_at_waist_y = beam_center_at_waist_y
self.beam_center_at_waist_z = beam_center_at_waist_z
self.average_angle_at_waist_x = average_angle_at_waist_x
self.average_angle_at_waist_y = average_angle_at_waist_y
self.photon_energy = photon_energy
self.energy_per_pulse = energy_per_pulse
self.repetition_rate = repetition_rate
self.polarization = polarization
self.horizontal_sigma_at_waist = horizontal_sigma_at_waist
self.vertical_sigma_at_waist = vertical_sigma_at_waist
self.pulse_duration = pulse_duration
self.transverse_gauss_hermite_mode_order_x = transverse_gauss_hermite_mode_order_x
self.transverse_gauss_hermite_mode_order_y = transverse_gauss_hermite_mode_order_y
# from Wofry Decorator
def get_wavefront(self, wavefront_parameters):
return self.get_SRW_Wavefront(source_wavefront_parameters=wavefront_parameters).toGenericWavefront()
def get_SRW_Wavefront(self, source_wavefront_parameters = WavefrontParameters()):
self.__source_wavefront_parameters = source_wavefront_parameters
source_wavefront_parameters.photon_energy_min = self.photon_energy
source_wavefront_parameters.photon_energy_max = self.photon_energy
source_wavefront_parameters.photon_energy_points = 1
mesh = source_wavefront_parameters.to_SRWRadMesh()
GsnBm = SRWLGsnBm() #Gaussian Beam structure (just parameters)
GsnBm.x = self.beam_center_at_waist_x
GsnBm.y = self.beam_center_at_waist_y
GsnBm.z = self.beam_center_at_waist_z
GsnBm.xp = self.average_angle_at_waist_x
GsnBm.yp = self.average_angle_at_waist_y
GsnBm.avgPhotEn = self.photon_energy
GsnBm.pulseEn = self.energy_per_pulse
GsnBm.repRate = self.repetition_rate
GsnBm.polar = self.polarization
GsnBm.sigX = self.horizontal_sigma_at_waist
GsnBm.sigY = self.vertical_sigma_at_waist
GsnBm.sigT = self.pulse_duration
GsnBm.mx = self.transverse_gauss_hermite_mode_order_x
GsnBm.my = self.transverse_gauss_hermite_mode_order_y
wfr = SRWWavefront()
wfr.allocate(mesh.ne, mesh.nx, mesh.ny)
wfr.mesh = mesh
wfr.partBeam.partStatMom1.x = GsnBm.x
wfr.partBeam.partStatMom1.y = GsnBm.y
wfr.partBeam.partStatMom1.z = GsnBm.z
wfr.partBeam.partStatMom1.xp = GsnBm.xp
wfr.partBeam.partStatMom1.yp = GsnBm.yp
arPrecPar = [source_wavefront_parameters._wavefront_precision_parameters._sampling_factor_for_adjusting_nx_ny]
srwl.CalcElecFieldGaussian(wfr, GsnBm, arPrecPar)
return wfr
def get_source_wavefront_parameters(self):
return self.__source_wavefront_parameters
def to_python_code(self, data=None):
is_multi_electron = data
text_code = ""
source_wavefront_parameters = self.get_source_wavefront_parameters()
if not source_wavefront_parameters is None:
text_code += source_wavefront_parameters.to_python_code()
text_code += "\n"
text_code += "wfr = SRWLWfr()" + "\n"
text_code += "wfr.allocate(mesh.ne, mesh.nx, mesh.ny)" + "\n"
text_code += "wfr.mesh = mesh" + "\n"
text_code += "\n"
text_code += "initial_mesh = deepcopy(wfr.mesh)" + "\n"
text_code += "\n"
text_code += "GsnBm = SRWLGsnBm()" + "\n"
text_code += "GsnBm.x = " + str(self.beam_center_at_waist_x) + "\n"
text_code += "GsnBm.y = " + str(self.beam_center_at_waist_y) + "\n"
text_code += "GsnBm.z = " + str(self.beam_center_at_waist_z) + "\n"
text_code += "GsnBm.xp = " + str(self.average_angle_at_waist_x) + "\n"
text_code += "GsnBm.yp = " + str(self.average_angle_at_waist_y) + "\n"
text_code += "GsnBm.avgPhotEn = " + str(self.photon_energy) + "\n"
text_code += "GsnBm.pulseEn = " + str(self.energy_per_pulse) + "\n"
text_code += "GsnBm.repRate = " + str(self.repetition_rate) + "\n"
text_code += "GsnBm.polar = " + str(self.polarization) + "\n"
text_code += "GsnBm.sigX = " + str(self.horizontal_sigma_at_waist) + "\n"
text_code += "GsnBm.sigY = " + str(self.vertical_sigma_at_waist) + "\n"
text_code += "GsnBm.sigT = " + str(self.pulse_duration) + "\n"
text_code += "GsnBm.mx = " + str(self.transverse_gauss_hermite_mode_order_x) + "\n"
text_code += "GsnBm.my = " + str(self.transverse_gauss_hermite_mode_order_y) + "\n"
text_code += "\n"
text_code += "wfr.partBeam.partStatMom1.x = GsnBm.x" + "\n"
text_code += "wfr.partBeam.partStatMom1.y = GsnBm.y" + "\n"
text_code += "wfr.partBeam.partStatMom1.z = GsnBm.z" + "\n"
text_code += "wfr.partBeam.partStatMom1.xp = GsnBm.xp" + "\n"
text_code += "wfr.partBeam.partStatMom1.yp = GsnBm.yp" + "\n"
text_code += "\n"
if not is_multi_electron:
text_code += "srwl.CalcElecFieldGaussian(wfr, GsnBm, [" + str(source_wavefront_parameters._wavefront_precision_parameters._sampling_factor_for_adjusting_nx_ny) + "])" + "\n"
return text_code
|
lucarebuffi/wofrysrw
|
wofrysrw/storage_ring/light_sources/srw_gaussian_light_source.py
|
Python
|
mit
| 8,130
|
[
"Gaussian"
] |
194e20e72d9d335e33da48f7ba05ea4bdd37376fa6739719ef8fe76c06b9bd8f
|
"""Functions for performing the individual processing steps.
- "nfo": Prepare meta-information for each trajectory
- "cnv1": Run trjconv for pbc imaging (gromacs runs only)
- "cnv2": Convert files to netcdf
"""
import subprocess
import os
import glob
import re
import json
import logging
from operator import itemgetter
from datetime import datetime
from mdtraj.formats import XTCTrajectoryFile, NetCDFTrajectoryFile
import mdtraj.utils
import numpy as np
log = logging.getLogger(__name__)
class config:
prefix = "processed.v2"
def _nfo(info, *, rncln_re, gen_glob, gen_re, gen=None, clone=None):
if 'run' not in info['meta']:
# Get metadata
rncln_ma = rncln_re.search(info['raw']['indir'])
info['meta']['run'] = int(rncln_ma.group(1))
info['meta']['clone'] = (int(rncln_ma.group(2))
if clone is None else clone)
log.debug("Got metadata {meta[project]}-{meta[run]}-{meta[clone]}"
.format(**info))
if 'path' not in info:
path = {'workdir': "{prefix}/{project}/{run}/{clone}"
.format(prefix=config.prefix, **info['meta'])}
path['info'] = "{workdir}/info.json".format(**path)
info['path'] = path
os.makedirs(info['path']['workdir'], exist_ok=True)
log.debug("Make workdir: {path[workdir]}".format(**info))
# Get gens
raw = info['raw']
raw['gen_glob'] = gen_glob
raw['date'] = datetime.now().isoformat()
raw['gens'] = [] # re-do each time
gens = sorted(
((int(gen_re.search(gen_fn).group(1) if gen is None else gen), gen_fn)
for gen_fn in glob.glob("{indir}/{gen_glob}".format(**raw))),
key=itemgetter(0)
)
# Make sure they're contiguous
prev_gen = -1
for gen, gen_fn in gens:
raw['gens'] += [gen_fn]
if gen != prev_gen + 1:
log.error("Found discontinous gens "
"in {meta[project]}-{meta[run]}-{meta[clone]}. "
"It went from {i1} to {i2}."
.format(i1=prev_gen, i2=gen, **info))
raw['success'] = False
info['raw'] = raw
return info
prev_gen = gen
if 'exclude' in raw:
log.warning("Excluding {meta[project]}-{meta[run]}-{meta[clone]}. "
"Reason: {raw[exclude]}".format(**info))
raw['gens'] = []
raw['success'] = False
else:
raw['success'] = True
info['raw'] = raw
# Get structure (topology) data
if 'top' not in info:
struct_fn = "structs-{meta[project]}.json".format(**info)
try:
with open(struct_fn) as f:
stru = json.load(f)
string_key = str(info['meta']['run']) # ugh
info['top'] = stru[string_key]
except Exception as e:
log.warning("No structure information. {}".format(e))
log.info("NFO: {project} run {run} clone {clone}".format(**info['meta']))
return info
def nfo(info, projcode):
rncln_res = {
'xa4': re.compile(r"RUN(\d+)/CLONE(\d+)/"),
'x21': re.compile(r"RUN(\d+)/CLONE(\d+)/"),
'bw': re.compile(r"run-(\d+)/"),
}
gen_globs = {
'xa4': "frame*.xtc",
'x21': "results-???/positions.xtc",
'bw': "traj_comp.xtc",
}
gen_res = {
'xa4': re.compile(r"frame(\d+).xtc"),
'x21': re.compile(r"results-(\d+)/positions.xtc"),
'bw': re.compile(r""),
}
gen = None
clone = None
if projcode == 'bw':
gen = 0
clone = 0
return _nfo(
info,
rncln_re=rncln_res[projcode],
gen_glob=gen_globs[projcode],
gen_re=gen_res[projcode],
gen=gen,
clone=clone,
)
def _run_trjconv(info, gen, gen_fn):
out_fn = "{outdir}/{gen}.{outext}".format(gen=gen, **info['cnv1'])
log.debug("Running trjconv {} {}".format(gen_fn, out_fn))
with open(info['cnv1']['log'], 'a') as logf:
popen = subprocess.Popen([
'gmx', 'trjconv',
'-f', gen_fn,
'-o', out_fn,
'-s', info['cnv1']['topology'],
'-pbc', 'mol',
'-center',
'-skip', str(info['cnv1']['stride']),
],
stdin=subprocess.PIPE,
stdout=logf,
stderr=subprocess.STDOUT
)
# Center based on 1 - Protein
# Output 0 - System
popen.communicate(b"1\n0")
popen.wait()
if popen.returncode != 0:
raise RuntimeError("Non-zero exit code from trjconv {}"
.format(popen.returncode))
return out_fn
def _cnv1(info, *, stride, topology, skip=False):
info['cnv1'] = {
'date': datetime.now().isoformat(),
'stride': stride,
'topology': topology,
'skip': skip,
'log': "{workdir}/cnv1.log".format(**info['path']),
'outdir': "{workdir}/cnv1".format(**info['path']),
'outext': 'xtc',
'gens': [] if 'cnv1' not in info else info['cnv1']['gens'],
}
if not info['raw']['success']:
info['cnv1']['success'] = False
return info
if skip:
info['cnv1']['success'] = True
return info
os.makedirs(info['cnv1']['outdir'], exist_ok=True)
done = len(info['cnv1']['gens'])
log.info("CNV1: {meta[project]}-{meta[run]}-{meta[clone]}. "
"Done {done}, doing {todo}"
.format(done=done, todo=len(info['raw']['gens']) - done, **info))
for gen, gen_fn in enumerate(info['raw']['gens']):
if gen < done:
continue
out_fn = _run_trjconv(info, gen, gen_fn)
info['cnv1']['gens'] += [out_fn]
info['cnv1']['success'] = True
return info
def _nc_a_chunk(xtc, nc, has_overlapping_frames):
xyz, time, step, box = xtc.read()
assert box.ndim == 3, box.ndim
al, bl, cl, alpha, beta, gamma = \
mdtraj.utils.box_vectors_to_lengths_and_angles(
box[:, 0, :], box[:, 1, :], box[:, 2, :]
)
xyz = xyz * 10
blengs = np.asarray([al, bl, cl]).T * 10
bangles = np.asarray([alpha, beta, gamma]).T
sl = slice(0, -1 if has_overlapping_frames else None, 1)
nc.write(
xyz[sl, ...],
time[sl, ...],
blengs[sl, ...],
bangles[sl, ...],
)
def _nc_a_traj(info, gen, gen_fn, has_overlapping_frames):
out_fn = "{outdir}/{gen}.{outext}".format(gen=gen, **info['cnv2'])
log.debug("Converting to netcdf {} {}".format(gen_fn, out_fn))
with XTCTrajectoryFile(gen_fn, 'r') as xtc:
with NetCDFTrajectoryFile(out_fn, 'w') as nc:
_nc_a_chunk(xtc, nc, has_overlapping_frames)
return out_fn
def _cnv2(info, *, has_overlapping_frames, chunk=100):
info['cnv2'] = {
'date': datetime.now().isoformat(),
'chunk': chunk,
'had_overlapping_frames': has_overlapping_frames,
'log': "{workdir}/cnv2.log".format(**info['path']),
'outdir': "{workdir}/cnv2".format(**info['path']),
'outext': 'nc',
'gens': [] if 'cnv2' not in info else info['cnv2']['gens'],
}
if not info['cnv1']['success']:
info['cnv2']['success'] = False
return info
if info['cnv1']['skip']:
prev_gens = info['raw']['gens']
else:
prev_gens = info['cnv1']['gens']
done = len(info['cnv2']['gens'])
log.info("CNV2: {meta[project]}-{meta[run]}-{meta[clone]}. "
"Converting to nc. Done {done}, todo {todo}"
.format(done=done, todo=len(prev_gens) - done, **info))
os.makedirs(info['cnv2']['outdir'], exist_ok=True)
for gen, gen_fn in enumerate(prev_gens):
if gen < done:
continue
out_fn = _nc_a_traj(info, gen, gen_fn, has_overlapping_frames)
info['cnv2']['gens'] += [out_fn]
info['cnv2']['success'] = True
return info
def cnv1(info, projcode):
if info['meta']['project'] == 'p9752':
stride = 4
elif info['meta']['project'] == 'p9761':
stride = 8
else:
stride = 1
topology = None
skip = False
if projcode == 'xa4':
topology = "{raw[indir]}/frame0.tpr".format(**info)
elif projcode == 'bw':
topology = "{raw[indir]}/topol.tpr".format(**info)
else:
skip = True
return _cnv1(
info,
stride=stride,
topology=topology,
skip=skip,
)
def cnv2(info, projcode):
if projcode == 'xa4':
overlap = True
else:
overlap = False
return _cnv2(
info,
has_overlapping_frames=overlap
)
|
mpharrigan/trajprocess
|
trajprocess/process.py
|
Python
|
mit
| 8,629
|
[
"Gromacs",
"MDTraj",
"NetCDF"
] |
bf2aa040e70cc1617ab687da10444b39037a773b4817635368254cac344d130c
|
"""
================================================
Segmenting the picture of greek coins in regions
================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
import skimage
from skimage.data import coins
from skimage.transform import rescale
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.fixes import parse_version
# these were introduced in skimage-0.14
if parse_version(skimage.__version__) >= parse_version('0.14'):
rescale_params = {'anti_aliasing': False, 'multichannel': False}
else:
rescale_params = {}
# load the coins as a numpy array
orig_coins = coins()
# Resize it to 20% of the original size to speed up the processing
# Applying a Gaussian filter for smoothing prior to down-scaling
# reduces aliasing artifacts.
smoothened_coins = gaussian_filter(orig_coins, sigma=2)
rescaled_coins = rescale(smoothened_coins, 0.2, mode="reflect",
**rescale_params)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(rescaled_coins)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 10
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
# %%
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=42)
t1 = time.time()
labels = labels.reshape(rescaled_coins.shape)
plt.figure(figsize=(5, 5))
plt.imshow(rescaled_coins, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l,
colors=[plt.cm.nipy_spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
|
glemaitre/scikit-learn
|
examples/cluster/plot_coin_segmentation.py
|
Python
|
bsd-3-clause
| 2,948
|
[
"Brian",
"Gaussian"
] |
01b7cb5460ca35cccad0e25f02fafacc5007407aed5c136337423cd509f34a41
|
'''
High level interface to Orbkit reading functions
'''
import re
from .native import read_native
from orbkit.read.molden import read_molden
from orbkit.read.gamess import read_gamess
from orbkit.read.gaussian_fchk import read_gaussian_fchk
from orbkit.read.gaussian_log import read_gaussian_log
from orbkit.read.aomix import read_aomix
from orbkit.read.wfx import read_wfx
from orbkit.read.wfn import read_wfn
from orbkit.read.native import read_native
from orbkit.read.cclib_parser import read_with_cclib
from orbkit.display import display
from orbkit.analytical_integrals import check_mo_norm
from .tools import find_itype, descriptor_from_file
readers = {'molden': read_molden,
'aomix': read_aomix,
'gamess': read_gamess,
'gaussian.log': read_gaussian_log, 'gaussian_log': read_gaussian_log,
'gaussian.fchk': read_gaussian_fchk, 'fchk': read_gaussian_fchk,
'wfn': read_wfn,
'wfx': read_wfx,
'cclib': read_with_cclib,
'native': read_native
} #: Specifies possible input types.
def main_read(fname, all_mo=False, spin=None, itype='auto', check_norm=False, **kwargs):
'''
This is the high-lever interface for the
orbkit reading routines.
**Parameters:**
fname: str, file descriptor
Specifies the filename for the input file.
fname can also be used with a file descriptor instad of a filename.
all_mo : bool, optional
If True, all molecular orbitals are returned.
spin : {None, 'alpha', or 'beta'}, optional
If not None, returns exclusively 'alpha' or 'beta' molecular orbitals.
itype : str, optional
Can be used to manually specify the input filetype.
check_norm : bool, optional
If True, ORBKIT verifies that molecular orbitals are orthonormal.
**Note:**
All additional keyword arguments are forwarded to the reading functions.
**Returns:**
qc (class QCinfo) with attributes geo_spec, geo_info, ao_spec, mo_spec, etot :
See :ref:`Central Variables` for details.
'''
if isinstance(fname, str):
filename = fname
else:
filename = fname.name
if itype == 'auto':
itype = find_itype(fname)
display('Loading data from {0} type file {1}\n'.format(itype, filename))
qc = readers[itype](fname, all_mo=all_mo, spin=spin, **kwargs)
if check_norm:
deviation = check_mo_norm(qc)
if deviation >= 1e-5:
raise ValueError('Bad molecular orbital norm: {0:%4e}'.format(deviation))
return qc
|
orbkit/orbkit
|
orbkit/read/high_level.py
|
Python
|
lgpl-3.0
| 2,531
|
[
"GAMESS",
"Gaussian",
"cclib"
] |
3a1d16bbd9f511e646287891cf55d12f607b1878b6fee98d9472e6f542d44c7e
|
#!/usr/bin/python
#
# Copyright 2015 John Kendrick
#
# This file is part of PDielec
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You should have received a copy of the MIT License
# along with this program, if not see https://opensource.org/licenses/MIT
#
"""Read the contents of a directory containing Abinit input and output files
Inherit the following from the GenericOutputReader
__init__
print_info
_read_output_file
"""
import re
import math
import numpy as np
from PDielec.Constants import amu, angs2bohr, atomic_number_to_element, hartree2ev
from PDielec.UnitCell import UnitCell
from PDielec.GenericOutputReader import GenericOutputReader
class AbinitOutputReader(GenericOutputReader):
"""Read the contents of a directory containing Abinit input and output files
Inherit the following from the GenericOutputReader
__init__
print_info
_read_output_file
"""
def __init__(self, filenames):
GenericOutputReader.__init__(self, filenames)
self.type = 'Abinit output files'
self._acell = None
self._charges = None
return
def _read_output_files(self):
"""Read the Abinit file names"""
# Define the search keys to be looked for in the files
self.manage = {} # Empty the dictionary matching phrases
self.manage['dynamical'] = (re.compile(' Dynamical matrix,'), self._read_dynamical)
self.manage['bornCharges'] = (re.compile(' Effective charges,'), self._read_born_charges)
self.manage['epsilon'] = (re.compile(' Dielectric tensor,'), self._read_epsilon)
self.manage['masses'] = (re.compile(' amu '), self._read_masses)
self.manage['nions'] = (re.compile(' natom '), self._read_natom)
self.manage['lattice'] = (re.compile(' rprim '), self._read_lattice_vectors)
self.manage['xred'] = (re.compile(' xred '), self._read_xred)
self.manage['typat'] = (re.compile(' typat '), self._read_typat)
self.manage['ntypat'] = (re.compile(' ntypat '), self._read_ntypat)
self.manage['acell'] = (re.compile(' acell '), self._read_acell)
self.manage['nkpt'] = (re.compile(' nkpt '), self._read_kpoints)
self.manage['band'] = (re.compile(' nband '), self._read_band)
self.manage['band1'] = (re.compile(' nband1 '), self._read_band)
self.manage['occupancy'] = (re.compile(' occ '), self._read_occupancy)
self.manage['occupancy1'] = (re.compile(' occ1 '), self._read_occupancy)
self.manage['ecut'] = (re.compile('^ *ecut '), self._read_energy_cutoff)
self.manage['kptrlatt'] = (re.compile(' kptrlatt '), self._read_kpoint_grid)
self.manage['electrons'] = (re.compile(' fully or partial'), self._read_electrons)
self.manage['pressure'] = (re.compile('-Cartesian.*GPa'), self._read_pressure)
self.manage['znucl'] = (re.compile('^ *znucl '), self._read_znucl)
self.manage['totalenergy'] = (re.compile('^ *Total energy '), self._read_total_energy)
for f in self._outputfiles:
self._read_output_file(f)
return
def _read_total_energy(self, line):
self.final_energy_without_entropy = float(line.split()[4]) * hartree2ev
self.final_free_energy = float(line.split()[4]) * hartree2ev
return
def _read_znucl(self, line):
self.species = []
for z in line.split()[1:]:
iz = int(float(z)+0.001)
self.species.append(atomic_number_to_element[iz].capitalize())
self.nspecies = len(self.species)
return
def _read_band(self, line):
self.nbands = int(line.split()[1])
return
def _read_occupancy(self, line):
occs = []
occupancies = line.split()[1:]
while len(occs) < self.nbands:
occs+= [ float(f) for f in occupancies ]
occupancies = self.file_descriptor.readline().split()
sum = 0.0
for f in occs:
sum += f
self.electrons = int(sum + 0.0001)
return
def _read_pressure(self, line):
self.pressure = float(line.split()[7])
return
def _read_electrons(self, line):
self.electrons = float(line.split()[6])
return
def _read_kpoint_grid(self, line):
self.kpoint_grid = [ int(line.split()[1]), int(line.split()[5]), int(line.split()[9]) ]
return
def _read_kpoints(self, line):
self.kpoints = int(line.split()[1])
return
def _read_energy_cutoff(self, line):
self.energy_cutoff = hartree2ev * float(line.split()[1])
return
def _read_acell(self, line):
self._acell = [float(f)/angs2bohr for f in line.split()[1:4]]
return
def _read_ntypat(self, line):
self.nspecies = int(line.split()[1])
return
def _read_typat(self, line):
# typat occurs last in the list of data items we need from the output file
self.atom_type_list = [int(i)-1 for i in line.split()[1:]]
self.masses = [None for i in range(self.nions)]
self.ions_per_type = [0 for i in range(self.nspecies)]
for i, a in enumerate(self.atom_type_list):
self.ions_per_type[a-1] += 1
self.masses[i] = self.masses_per_type[a]
if self.species:
species_list = [ self.species[i] for i in self.atom_type_list ]
self.unit_cells[-1].set_element_names(species_list)
return
def _read_epsilon(self, line):
for i in range(3):
linea = self.file_descriptor.readline().split()
nlines = 9
for i in range(nlines):
linea = self.file_descriptor.readline().split()
if not linea:
linea = self.file_descriptor.readline().split()
j = int(linea[0])
k = int(linea[2])
self.zerof_optical_dielectric[j-1][k-1] = float(linea[4])
return
def _read_natom(self, line):
self.nions = int(line.split()[1])
# We can only create this once we know the number of ions
self._charges = np.zeros((self.nions, 3, 3))
return
def _read_masses(self, line):
self.masses_per_type = [float(f) for f in line.split()[1:]]
return
def _read_dynamical(self, line):
# Read the dynamical matrix
nmodes = self.nions*3
hessian = np.zeros((nmodes, nmodes))
for i in range(4):
self.file_descriptor.readline()
nlines = nmodes*nmodes
for i in range(nlines):
linea = self.file_descriptor.readline().split()
if not linea:
linea = self.file_descriptor.readline().split()
diri = int(linea[0])
atomi = int(linea[1])
dirj = int(linea[2])
atomj = int(linea[3])
ipos = (atomi - 1)*3 + diri - 1
jpos = (atomj - 1)*3 + dirj - 1
# store the massweighted matrix
hessian[ipos][jpos] = float(linea[4])/(amu*math.sqrt(self.masses[atomi-1]*self.masses[atomj-1]))
# symmetrise, project diagonalise and store frequencies and normal modes
self._dynamical_matrix(hessian)
return
def _read_born_charges(self, line):
"""Read the born charges from the outputfile file.
Each row of the output refers to a given field direction
Each column in the row refers the atomic displacement
so the output is arranged [[a1x a1y a1z]
[a2x a2y a2z]
[a3x a3y a3z]]
where 1,2,3 are the field directions and x, y, z are the atomic displacements"""
for i in range(5):
self.file_descriptor.readline()
# The charges are calculated in two ways, we take the mean of the phonon and the field
nlines = 9*self.nions
for i in range(nlines):
linea = self.file_descriptor.readline().split()
if not linea:
linea = self.file_descriptor.readline().split()
if int(linea[3]) > self.nions:
ifield = int(linea[2])
ixyz = int(linea[0])
iatom = int(linea[1])
else:
ifield = int(linea[0])
ixyz = int(linea[2])
iatom = int(linea[3])
self._charges[iatom-1][ifield-1][ixyz-1] += 0.5*float(linea[4])
# Convert the charges
self.born_charges = []
for i in range(self.nions):
atom = []
for ifield in range(3):
b = self._charges[i][ifield][:].tolist()
atom.append(b)
self.born_charges.append(atom)
return
def _read_xred(self, line):
linea = line.split()[1:]
fractional = []
fractional.append( [ float(xyz) for xyz in linea ] )
for i in range(self.nions-1):
linea = self.file_descriptor.readline().split()
fractional.append( [ float(xyz) for xyz in linea ] )
# end for i
self.unit_cells[-1].set_fractional_coordinates(fractional)
if self.species:
species_list = [ self.species[i] for i in self.atom_type_list ]
self.unit_cells[-1].set_element_names(species_list)
# end def
def _read_lattice_vectors(self, line):
linea = line.split()
avector = [float(linea[1]), float(linea[2]), float(linea[3])]
linea = self.file_descriptor.readline().split()
bvector = [float(linea[0]), float(linea[1]), float(linea[2])]
linea = self.file_descriptor.readline().split()
cvector = [float(linea[0]), float(linea[1]), float(linea[2])]
avector = [f * self._acell[0] for f in avector]
bvector = [f * self._acell[1] for f in bvector]
cvector = [f * self._acell[2] for f in cvector]
self.unit_cells.append(UnitCell(avector, bvector, cvector))
self.ncells = len(self.unit_cells)
self.volume = self.unit_cells[-1].volume
return
|
JohnKendrick/PDielec
|
PDielec/AbinitOutputReader.py
|
Python
|
mit
| 10,508
|
[
"ABINIT"
] |
a98139b48a95fd660bd9b28087bca87ac6dea5f5a124461cacd7194dda2b1fc8
|
import ocl as cam
import camvtk
import time
import vtk
import datetime
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
myscreen.setAmbient(20,20,20)
#stl = camvtk.STLSurf(filename="demo.stl")
stl = camvtk.STLSurf(filename="demo2.stl")
print "STL surface read"
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
#stl.SetFlat()
polydata = stl.src.GetOutput()
s= cam.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STLSurf with ", s.size(), " triangles"
cutterDiameter=0.6
cutter = cam.CylCutter(cutterDiameter)
#print cutter.str()
#print cc.type
minx=-20
dx=1
maxx=20
miny=-20
dy=01
maxy=20
z=-0.2
bucketSize = 20
#pftp = cam.ParallelFinish()
#pftp.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
#pftp.initSTLSurf(s, bucketSize)
#pftp.dropCutterSTL1(cutter)
#print " made ", pftp.dcCalls, " drop-cutter calls"
#exit
pf2 = cam.ParallelFinish()
pf2.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
pf2.initSTLSurf(s, bucketSize)
pf2.dropCutterSTL2(cutter)
print " made ", pf2.dcCalls, " drop-cutter calls"
#clpoints = pftp.getCLPoints()
#ccpoints = pftp.getCCPoints()
clpoints = pf2.getCLPoints()
ccpoints = pf2.getCCPoints()
#CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
nv=0
nn=0
ne=0
nf=0
myscreen.camera.SetPosition(3, 100, 15)
myscreen.camera.SetFocalPoint(50, 50, 0)
t = camvtk.Text()
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
t2.SetPos( (myscreen.width-200, 30) )
myscreen.addActor( t2)
t3 = camvtk.Text()
t3.SetPos( (30, 30))
myscreen.addActor( t3)
t4 = camvtk.Text()
t4.SetPos( (30, myscreen.height-60))
myscreen.addActor( t4)
n=0
precl = cam.Point()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
w2if.Modified()
lwr.SetFileName("tux1.png")
for cl,cc in zip(clpoints,ccpoints):
camEye = myscreen.camera.GetFocalPoint()
camPos = myscreen.camera.GetPosition()
postext = "(%3.3f, %3.3f, %3.3f)" % (camPos[0], camPos[1], camPos[2])
eyetext = "(%3.3f, %3.3f, %3.3f)" % (camEye[0], camEye[1], camEye[2])
camtext = "Camera LookAt: "+eyetext+"\nCamera Pos: "+ postext
t4.SetText(camtext)
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
xtext = "%3.3f" % cl.x
ytext = "%3.3f" % cl.y
ztext = "%3.3f" % cl.z
t2.SetText( "X: " + xtext + "\nY: " + ytext + "\nZ: " + ztext )
if cc.type==cam.CCType.FACET:
nf+=1
col = (0,1,1)
elif cc.type == cam.CCType.VERTEX:
nv+=1
col = (0,1,0)
elif cc.type == cam.CCType.EDGE:
ne+=1
col = (1,0,0)
elif cc.type == cam.CCType.NONE:
#print "type=NONE!"
nn+=1
col = (1,1,1)
#if cl.isInside(t):
# col = (0, 1, 0)
#else:
# col = (1, 0, 0)
trilist = pf2.getTrianglesUnderCutter(cl, cutter)
#print "at cl=", cl.str() , " where len(trilist)=", len(trilist)
t3.SetText("Total Triangles: "+ str(s.size()) +"\nUnder Cutter (red): "+str(len(trilist)))
stl2 = camvtk.STLSurf(filename=None, triangleList=trilist, color=(1,0,0)) # a new surface with only triangles under cutter
stl2.SetWireframe()
#stl2.SetFlat()
myscreen.addActor(stl2)
trilist=[]
cutactor = camvtk.Cylinder(center=(cl.x,cl.y,cl.z), radius=cutterDiameter/2, height=2, color=(0.7,1,1))
myscreen.addActor( cutactor )
#myscreen.addActor( camvtk.Point(center=(cl.x,cl.y,cl.z) , color=col) )
if n==0:
precl = cl
else:
d = cl-precl
if (d.norm() < 9):
myscreen.addActor( camvtk.Line( p1=(precl.x, precl.y, precl.z), p2=(cl.x, cl.y, cl.z), color=(0,1,1) ) )
precl = cl
n=n+1
#myscreen.addActor( camvtk.Point(center=(cl2.x,cl2.y,cl2.z+0.2) , color=(0.6,0.2,0.9)) )
#myscreen.addActor( camvtk.Point(center=(cc.x,cc.y,cc.z), color=col) )
#print cc.type
myscreen.camera.Azimuth( 0.2 )
#time.sleep(0.01)
myscreen.render()
w2if.Modified()
lwr.SetFileName("kdmov"+ ('%05d' % n)+".png")
#lwr.Write()
#raw_input("Press Enter to continue")
myscreen.removeActor(stl2)
myscreen.removeActor( cutactor )
print "none=",nn," vertex=",nv, " edge=",ne, " facet=",nf, " sum=", nn+nv+ne+nf
print len(clpoints), " cl points evaluated"
#lwr.Write()
for n in range(1,36):
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.camera.Azimuth( 1 )
time.sleep(0.01)
myscreen.render()
lwr.SetFileName("kd_frame"+ ('%03d' % n)+".png")
w2if.Modified()
#lwr.Write()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
|
AlanZatarain/opencamlib
|
scripts/kdtree_movie1.py
|
Python
|
gpl-3.0
| 5,347
|
[
"VTK"
] |
5a150bdd4e598d60eed8fd5e2d2bea80b18b0ef666595d9afaf89f7ccd621ab0
|
from __future__ import print_function
import os
from .abinittask import AbinitTask
__all__ = ['AbinitScfTask']
class AbinitScfTask(AbinitTask):
"""Charge density calculation."""
_TASK_NAME = 'SCF task'
_input_fname = 'scf.in'
_output_fname = 'scf.out'
def __init__(self, dirname, **kwargs):
"""
Arguments
---------
dirname : str
Directory in which the files are written and the code is executed.
Will be created if needed.
Keyword arguments
-----------------
ecut : float
Kinetic energy cut-off, in Hartree.
tolvrs : float (1e-10)
Tolerance on residual potential used as a convergence criterion
for the SCF cycle.
prefix : str
Prefix used as a rootname for abinit calculations.
structure : pymatgen.Structure
Structure object containing information on the unit cell.
ngkpt : list(3), int, optional
K-points grid. Number of k-points along each primitive vector
of the reciprocal lattice.
kshift : list(3), float, optional
Relative shift of the k-points grid along each direction,
as a fraction of the smallest division along that direction.
qshift : list(3), float, optional
Absolute shift of the k-points grid along each direction.
nspinor : number of spinorial components, int, optional
Default 1
input_variables : dict
Any other input variables for the Abinit input file.
See also:
Properties
----------
charge_density_fname : str
The charge density file produced by Abinit.
vxc_fname : str
The xc potential file produced by Abinit.
"""
kwargs.setdefault('prefix', 'scf')
super(AbinitScfTask, self).__init__(dirname, **kwargs)
self.input.set_variables(self.get_scf_variables(**kwargs))
@staticmethod
def get_scf_variables(**kwargs):
"""Return a dict of variables required for an SCF calculation."""
variables = dict(
prtden = 1,
prtwf = 0,
tolvrs = kwargs.get('tolvrs', 1e-10),
ecut = kwargs.get('ecut'),
nspinor = kwargs.pop('nspinor',1),
)
return variables
@property
def charge_density_fname(self):
#return os.path.join(self.dirname, self.get_odat('DEN'))
return self.get_odat('DEN')
rho_fname = charge_density_fname
@property
def xc_potential_fname(self):
#return os.path.join(self.dirname, self.get_odat('DEN'))
return self.get_odat('VXC')
vxc_fname = xc_potential_fname
|
trangel/OPTpy
|
OPTpy/Abinit/scftask.py
|
Python
|
gpl-3.0
| 2,767
|
[
"ABINIT",
"pymatgen"
] |
76b546fc1c69709d0c5f48fcf2aae003153a4a5daa3f0c6237aca9c040b8297c
|
from __future__ import print_function
"""
Module for read saint raw files from bruker integration
"""
import numpy
from ImageD11 import columnfile
docsheader = """
...from the file sairefl._tp...
Copyright Bruker
ITEM FORMAT DESCRIPTION
---- ------ -----------
COMMENT '!' An exclamation mark in the first column
indicates that the line contains a
comment. (If the line is not a
comment, the record starts in the first
column, not the second; that is, the
first column is not "reserved" for a
possible exclamation mark)
"""
docs = """
IHKL 3I4 HKL indices of the reflection
#IMNP 3I4 PRESENT ONLY FOR MODULATED-STRUCTURE DATA
MNP indices of the reflection. Unused
indices (in 4- or 5-dimensional cases)
are written as zero
FI F8.x Integrated intensity in photons, background-
subtracted, normalized to 1 min/deg, and
Lp corrected. Number of places to right
of the decimal point is adjusted to
balance precision vs. overflow; exponential
format is used for very large values.
SI F8.x Estimated standard deviation of the
intensity. Number of places to right
of the decimal point is adjusted to
balance precision vs. overflow; exponential
format is used for very large values.
IBATNO I4 Batch number (for scaling) assigned to
the reflection
COSINES 6F8.5 Direction cosines of the incident and
diffracted beams, relative to the
unit cell axes. Order is XI,XD,YI,YD,
ZI,ZD where XI is the X-component of
the incident beam, XD is the X-component
of the diffracted beam, etc. Used
for absorption correction.
MSTATUS I3 1X,Z2 Status mask reserved for flagging
abnormal conditions. There are
currently none defined.
XO F7.2 Observed X-pixel coordinate of the
intensity-weighted reflection centroid,
in reduced pixels (scale of 512x512)
YO F7.2 Observed Y-pixel coordinate of the
intensity-weighted reflection centroid,
in reduced pixels (scale of 512x512)
ZO F8.2 Observed frame number of the
intensity-weighted reflection centroid
XP F7.2 Predicted X-pixel of the reflection
in reduced pixels (scale of 512x512)
YP F7.2 Predicted Y-pixel of the reflection
in reduced pixels (scale of 512x512)
ZP F8.2 Predicted frame number of the reflection
CORLPAF F6.3 Multiplicative correction for Lorentz
effect, polarization, air absorption,
and detector faceplate absorption,
already applied to integrated intensity,
FI
CORREL F5.2 The correlation coefficient between the
3-D profile observed for this reflection
and the corresponding model 3-D profile
ACCUMTIME F7.2 Accumulated hours of exposure
SWING F7.2 Detector swing angle in degrees
ROT F7.2 Scan axis (omega or phi) setting in
degrees at which the reflection was
observed
IAXIS I2 Scan axis number (2=omega, 3=phi)
ISTL I5 SIN(THETA)/LAMBDA times 10,000
PKSUM I9 Total raw peak counts in photons, with no
correction for background, normalization,
or Lp
BGAVG I7 Normally, average BG per pixel in
photons * 1000. In data sets where the
background was reported to be very large
( > 1024 photons after 1 min/deg
normalization), the program issues a warning
message during integration and the scale of
1000 is omitted.
ROTREL F7.2 Rotation of the scan axis (omega or phi)
in degrees relative to the start of the
run
ICRYST I4 Crystal number (for scaling)
PKFRAC F6.3 Fraction of the profile volume nearest
this HKL. 1-PKFRAC is the fraction of
the intensity which had to be estimated
from the model profiles because of
overlap with neighboring spots
IKEY I11 The unique sort key for the group of
equivalents to which this HKL belongs.
IKEY = 1048576 * (H+511) + 1024 *
(K+511) + (L+511), where H,K and L are
the HKL indices transformed to the base
asymmetric unit.
IMULT I3 The point-group multiplicity of this HKL
CORLORENTZ F6.3 Lorentz correction
XGEO F8.2 Spatially corrected X relative to beam
center, in pixels
YGEO F8.2 Spatially corrected Y relative to beam
center, in pixels
CHI F8.3 Chi setting angle, degrees
OTHER_ANGLE F8.3 The remaining setting angle, degrees. This
will be phi if scans were in omega, or omega
if scans were in phi
ICOMPONENT I4 Twin component number in SHELXTL HKLF 5
convention. In a multi-component overlap,
ICOMPONENT is negated for all but the last
record of the overlap
"""
class saintraw(object):
doc = docs
titles = []
formats = {}
helps = {}
def __init__(self, filename=None):
"""
filename = filename to read in
"""
self.parsedocs()
if filename is not None:
self.read(filename)
def parsedocs(self):
"""
Parse the saint documentation for the Bruker format
"""
self.titles = []
title = help = format = None
for line in self.doc.split("\n"):
if len(line.rstrip()) == 0:
if title is not None:
self.formats[title] = format
self.helps[title] = help
self.titles.append(title)
title = None
format = None
continue
if line[0] != " ":
title, format = line.split()[0:2]
help = " ".join(line.split()[2:])
else:
help = " ".join([help, line.lstrip()])
alltitles = []
slices = []
funcs = []
i = 0
allformats = []
for t in self.titles:
f = self.formats[t]
if f[0].isdigit():
n = int(f[0])
f = f[1:]
else:
n = 1
if n > 1:
for j in range(n):
alltitles.append( t + "_%d" % (j) )
allformats.append( f )
else:
alltitles.append( t )
allformats.append( f )
assert f[0] in ["I","F"]
if f[0] == "I":
for dummy in range(n):
funcs.append( int )
if f[0] == "F":
for dummy in range(n):
funcs.append( float )
num = int(f[1:].split(".")[0])
for dummy in range(n):
slices.append( slice( i, i + num ) )
i += num
self.alltitles = alltitles
self.allformats = allformats
self.funcs = funcs
self.slices = slices
assert len(funcs) == len(slices)
assert len(slices) == len(alltitles)
def read(self, filename):
"""
Read an ascii formatted saint reflection file
"""
self.data = {}
self.lines = open(filename,"r").readlines()
for t in self.alltitles:
self.data[t] = []
zipped = list(zip(self.alltitles, self.slices, self.funcs))
for line in self.lines:
if line[0] == "!":
# Comment line
continue
for t,s,f in zipped:
# Parse this line
try:
self.data[t].append( f( line[s] ) )
except:
print(t,s,f)
raise
def condition_filter(self, name, func):
"""
Remove the peaks according to condition
"""
assert len(self.lines) == len(self.data[name] )
indices = numpy.compress( func( numpy.array( self.data[name]) ) ,
list(range(len(self.lines))) )
self.take( indices )
def take(self, order):
"""
Put the peaks in the order given in order (indices)
"""
for t in list(self.data.keys()):
self.data[t] = numpy.take( self.data[t],
order)
self.lines = list( numpy.take( self.lines,
order))
def sort(self, name):
"""
Sort according to a column in self.data
"""
order = numpy.argsort( self.data[name] )
self.take(order)
def write(self, filename):
"""
Write an ascii formatted saint reflection file
"""
outf = open(filename, "w")
for line in self.lines:
outf.write( line )
# raise Exception("Not implemented writing yet!")
def tocolumnfile(self):
"""
Return a columnfile
"""
cof = columnfile.newcolumnfile( self.alltitles )
dlist = [ self.data[t] for t in self.alltitles ]
cof.bigarray = numpy.array( dlist, numpy.float )
cof.nrows = len( self.data[ self.alltitles[0] ] )
cof.ncols = len( self.alltitles )
cof.set_attributes()
return cof
if __name__ == "__main__":
import sys, time
START = time.time()
sra = saintraw()
print("Making object", time.time() - START)
START = time.time()
sra.read(sys.argv[1])
print("Reading", time.time() - START)
print(len(sra.data['IHKL_0']))
START = time.time()
cra = sra.tocolumnfile()
print(cra.bigarray.shape)
print("Convert to colfile", time.time() - START)
|
jonwright/ImageD11
|
ImageD11/saintraw.py
|
Python
|
gpl-2.0
| 11,924
|
[
"CRYSTAL"
] |
8122cfe8e8b3116126a33de7ace9c0977807476cd7cbe62d6ce3b3559f3e2f6e
|
from builtins import range
import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
N = x.shape[0]
###########################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
###########################################################################
reshape_x = np.reshape(x, [N, -1])
out = np.dot(reshape_x, w) + b
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the affine backward pass. #
###########################################################################
dx = np.dot(dout, w.T).reshape(x.shape)
dw = np.dot(x.reshape(x.shape[0], -1).T, dout)
db = np.sum(dout, axis=0, keepdims=True)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def affine_forward_alt(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, D) and then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, D)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = np.dot(x, w) + b
cache = (x, w, b)
return out, cache
def affine_backward_alt(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, D)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, D)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx = np.dot(dout, w.T)
dw = np.dot(x.T, dout)
db = np.sum(dout, axis=0, keepdims=True)
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
###########################################################################
# TODO: Implement the ReLU forward pass. #
###########################################################################
out = np.maximum(x, 0)
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
###########################################################################
# TODO: Implement the ReLU backward pass. #
###########################################################################
dx = dout
dx[x <= 0] = 0
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the
mean and variance of each feature, and these averages are used to normalize
data at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7
implementation of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == 'train':
#######################################################################
# TODO: Implement the training-time forward pass for batch norm. #
# Use minibatch statistics to compute the mean and variance, use #
# these statistics to normalize the incoming data, and scale and #
# shift the normalized data using gamma and beta. #
# #
# You should store the output in the variable out. Any intermediates #
# that you need for the backward pass should be stored in the cache #
# variable. #
# #
# You should also use your computed sample mean and variance together #
# with the momentum variable to update the running mean and running #
# variance, storing your result in the running_mean and running_var #
# variables. #
#######################################################################
sample_mean = np.mean(x, axis=0)
sample_var = np.var(x, axis=0)
sample_std = np.sqrt(sample_var + eps)
sample_x = (x - sample_mean) / sample_std
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
out = sample_x * gamma + beta
cache = x, sample_x, eps, gamma, sample_var, sample_mean
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test-time forward pass for batch normalization. #
# Use the running mean and variance to normalize the incoming data, #
# then scale and shift the normalized data using gamma and beta. #
# Store the result in the out variable. #
#######################################################################
running_std = np.sqrt(running_var + eps)
test_x = (x - running_mean) / running_std
out = test_x * gamma + beta
#######################################################################
# END OF YOUR CODE #
#######################################################################
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
###########################################################################
x, x_hat, eps, gamma, var, mean = cache
N = x.shape[0]
dx_hat = dout * gamma
dvar = -0.5 * np.sum(dx_hat * (x - mean), axis=0) * np.power(eps + var, -1.5)
dx_hat_cache = dx_hat * (np.power(eps + var, -0.5))
dmean = -1 * np.sum(dx_hat_cache, axis=0) - 2 * dvar * np.mean((x - mean), axis=0)
dx = dx_hat_cache + dvar * 2 * (x - mean) / N + dmean / N
dgamma = np.sum(dout * x_hat, axis=0)
dbeta = np.sum(dout, axis=0)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
# #
# After computing the gradient with respect to the centered inputs, you #
# should be able to compute gradients with respect to the inputs in a #
# single statement; our implementation fits on a single 80-character line.#
###########################################################################
x, x_hat, eps, gamma, var, mean = cache
N = x.shape[0]
dx_hat = dout * gamma
dvar = -0.5 * np.sum(dx_hat * (x - mean), axis=0) * np.power(eps + var, -1.5)
dx_hat_cache = dx_hat * (np.power(eps + var, -0.5))
dmean = -1 * np.sum(dx_hat_cache, axis=0) - 2 * dvar * np.mean((x - mean), axis=0)
dx = dx_hat_cache + dvar * 2 * (x - mean) / N + dmean / N
dgamma = np.sum(dout * x_hat, axis=0)
dbeta = np.sum(dout, axis=0)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not
in real networks.
Outputs:
- out: Array of the same shape as x.
- cache: tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
#######################################################################
mask = (np.random.rand(*x.shape) < p) / p
out = x * mask
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
#######################################################################
out = x
#######################################################################
# END OF YOUR CODE #
#######################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase backward pass for inverted dropout #
#######################################################################
dx = dout * mask
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and
width W. We convolve each input with F different filters, where each filter
spans all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
###########################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
###########################################################################
pad = conv_param['pad']
stride = conv_param['stride']
N, _, H, W = x.shape
F, _, HH, WW = w.shape
H_dot = 1 + (H + 2 * pad - HH) / stride
W_dot = 1 + (W + 2 * pad - WW) / stride
out = np.zeros((N, F, H_dot, W_dot))
new_x = np.pad(x[:, :, :, :], ((0, 0), (0, 0), (pad, pad), (pad, pad)), 'constant')
for n in range(N):
for f in range(F):
for h_dot in range(0,H + 2 * pad - HH + 1,stride):
for w_dot in range(0,W + 2 * pad - WW + 1,stride):
out[n, f, h_dot/stride, w_dot/stride] = np.sum(new_x[n, :, h_dot:h_dot + HH, w_dot:w_dot + WW] * w[f]) + b[f]
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, new_x, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the convolutional backward pass. #
###########################################################################
x, x_pad, w, b, conv_param = cache
pad = conv_param['pad']
stride = conv_param['stride']
N, F, H1, W1 = dout.shape
N, C, H, W = x.shape
HH = w.shape[2]
WW = w.shape[3]
dx = np.zeros((N, C, H, W))
dx_pad = np.zeros(x_pad.shape)
dw = np.zeros(w.shape)
db = np.zeros(b.shape)
for n in range(N):
for f in range(F):
for i in range(H1):
for j in range(W1):
db[f] += dout[n, f, i, j]
dw[f] += dout[n, f, i, j] * x_pad[n, :, i * stride:i * stride + HH, j * stride:j * stride + WW]
dx_pad[n, :, i * stride:i * stride + HH, j * stride:j * stride + WW] += dout[n, f, i, j] * w[f]
dx = dx_pad[:, :, pad:pad + H, pad:pad + W].copy()
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
###########################################################################
# TODO: Implement the max pooling forward pass #
###########################################################################
HH, WW, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']
N, C, H, W = x.shape
out_h = 1 + (H - HH) / stride
out_w = 1 + (W - WW) / stride
out = np.zeros((N, C, out_h, out_w))
for n in range(N):
for h in range(out_h):
for w in range(out_w):
out[n,:,h,w] = np.max(x[n,:,h*stride:h*stride+HH,w*stride:w*stride+WW], axis=(1,2))
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
###########################################################################
# TODO: Implement the max pooling backward pass #
###########################################################################
x, pool_param = cache
N, C, H, W = x.shape
HH = pool_param['pool_height']
WW = pool_param['pool_width']
stride = pool_param['stride']
H1 = np.int32(1 + (H - HH) / stride)
W1 = np.int32(1 + (W - WW) / stride)
dx = np.zeros(x.shape)
for n in range(N):
for c in range(C):
for i in range(H1):
for j in range(W1):
index = np.where(x[n, c] == np.max(x[n, c, i * stride:i * stride + HH, j * stride:j * stride + WW]))
length = len(index[0])
for m in range(length):
dx[n, c, index[0][m], index[1][m]] = dout[n, c, i, j]
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
###########################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
N, C, H, W = x.shape
x_new = x.transpose(0, 2, 3, 1).reshape(N * H * W, C)
out, cache = batchnorm_forward(x_new, gamma, beta, bn_param)
out = out.reshape(N, H, W, C).transpose(0, 3, 1, 2)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
N, C, H, W = dout.shape
dout_new = dout.transpose(0, 2, 3, 1).reshape(N * H * W, C)
dx, dgamma, dbeta = batchnorm_backward(dout_new, cache)
dx = dx.reshape(N, H, W, C).transpose(0, 3, 1, 2)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
|
YeEmrick/learning
|
cs231/assignment/assignment2/cs231n/layers.py
|
Python
|
apache-2.0
| 29,281
|
[
"NEURON"
] |
ec67cb9e62caa0d2e847f34922024c0aa291956af5afa2721af653dbec4fab20
|
"""
This module implements modified nodal analysis (MNA).
Copyright 2014--2022 Michael Hayes, UCECE
"""
from __future__ import division
from .assumptions import Assumptions
from .vector import Vector
from .matrix import Matrix, matrix_inverse
from .sym import symsimplify
from .expr import ExprDict, expr
from .voltage import Vtype
from .current import Itype
from .systemequations import SystemEquations
import sympy as sym
# Note, all the maths is performed using sympy expressions and the
# values and converted to Expr when required. This is more
# efficient and, more importantly, overcomes some of the wrapping
# problems which casues the is_real attribute to be dropped.
class Nodedict(ExprDict):
def __getitem__(self, name):
"""Return node by name or number."""
# If name is an integer, convert to a string.
if isinstance(name, int):
name = '%d' % name
return super(Nodedict, self).__getitem__(name)
class Branchdict(ExprDict):
pass
class MNA(object):
"""This class performs modified nodal analysis (MNA) on a netlist of
components. There are several variants:
1. DC analysis if all the independent sources are DC. The .V and .I
methods return DC expressions with the dc assumption set.
2. AC analysis if all the independent sources are AC. The .V and .I
methods return phasors.
3. Initial value Laplace analysis if an L or C has an explicit
initial value. The .V and .I methods return s-domain expressions
with no assumption sets; thus the time-domain results are only
valid for t >= 0.
4. General Laplace analysis. If all the sources are causal and
all the initial conditions are zero (explicitly or implicitly)
then the time-domain results are causal.
5. Noise analysis.
Note, it is assumed that the user of this class uses superposition
to solve problems with mixed independent sources, such as DC and
AC.
"""
def __init__(self, cct):
self.cct = cct
self.kind = cct.kind
if cct.elements == {}:
raise ValueError('No elements to analyse')
# TODO: think this out. When a circuit is converted
# to a s-domain model we get Z (and perhaps Y) components.
# We also lose the ability to determine the voltage
# across a capacitor or inductor since they get split
# into a Thevenin model and renamed.
if hasattr(self, '_s_model'):
raise RuntimeError('Cannot analyse s-domain model')
# Determine which branch currents are needed.
self.unknown_branch_currents = []
for elt in self.cct.elements.values():
if elt.need_branch_current:
self.unknown_branch_currents.append(elt.name)
if elt.need_extra_branch_current:
self.unknown_branch_currents.append(elt.name + 'X')
# Generate stamps.
num_nodes = len(self.cct.node_list) - 1
num_branches = len(self.unknown_branch_currents)
self._G = sym.zeros(num_nodes, num_nodes)
self._B = sym.zeros(num_nodes, num_branches)
self._C = sym.zeros(num_branches, num_nodes)
self._D = sym.zeros(num_branches, num_branches)
self._Is = sym.zeros(num_nodes, 1)
self._Es = sym.zeros(num_branches, 1)
# Iterate over circuit elements and fill in matrices.
for elt in self.cct.elements.values():
if not elt.nosim:
elt._stamp(self)
# Augment the admittance matrix to form A matrix.
self._A = self._G.row_join(self._B).col_join(self._C.row_join(self._D))
# Augment the known current vector with known voltage vector
# to form Z vector.
self._Z = self._Is.col_join(self._Es)
def _invalidate(self):
for attr in ('_A', '_Vdict', '_Idict'):
if hasattr(self, attr):
delattr(self, attr)
def _cpt_node_indexes(self, cpt):
return [self._node_index(n) for n in cpt.nodenames]
def _cpt_branch_index(self, cpt):
return self._branch_index(cpt.name)
def _node_index(self, node):
"""Return node index; ground is -1"""
return self.cct.node_list.index(self.cct.node_map[node]) - 1
def _branch_index(self, cpt_name):
try:
index = self.unknown_branch_currents.index(cpt_name)
return index
except ValueError:
raise ValueError('Unknown component name %s for branch current' % cpt_name)
def _failure_reasons(self):
message = 'The MNA A matrix is not invertible for %s analysis:' % self.kind
cct = self.cct
if not cct.is_connected:
return message + ' Not all nodes are connected. Use cct.unconnected_nodes() to find them.'
reasons = []
if cct.kind == 'dc':
reasons.append('Check there is a DC path between all nodes.')
if cct.transformers != []:
reasons.append('Check secondary of transformer is referenced to ground.')
if len(cct.capacitors) > 1:
reasons.append('Check capacitors are not in series.')
if cct.voltage_sources != []:
reasons.append('Check voltage source is not short-circuited.')
if len(cct.voltage_sources) > 1:
reasons.append('Check for loop of voltage sources.')
if cct.current_sources != []:
reasons.append('Check current source is not open-circuited.')
if len(cct.current_sources) > 1:
reasons.append('Check for current sources in series.')
return message + '\n ' + '\n '.join(reasons)
def _solve(self):
"""Solve network."""
if hasattr(self, '_Vdict'):
return
if '0' not in self.cct.node_map:
raise RuntimeError('Cannot solve: nothing connected to ground node 0')
# Solve for the nodal voltages
try:
# The default method, Gaussian elimination, is the fastest
# but hangs on some matrices with sympy-1.6.1
# Comparative times for the testsuites are:
# GE 66, ADJ 73, LU 76.
Ainv = matrix_inverse(self._A)
except ValueError:
message = self._failure_reasons()
raise ValueError(message)
results = symsimplify(Ainv * self._Z)
results = results.subs(self.cct.context.symbols)
branchdict = {}
for elt in self.cct.elements.values():
if elt.type == 'K' or elt.ignore:
continue
n1, n2 = self.cct.node_map[elt.nodenames[0]], self.cct.node_map[elt.nodenames[1]]
branchdict[elt.name] = (n1, n2)
vtype = Vtype(self.kind)
itype = Itype(self.kind)
assumptions = Assumptions()
if vtype.is_phasor_domain:
assumptions.set('omega', self.kind)
elif self.kind in ('s', 'ivp'):
assumptions.set('ac', self.cct.is_ac)
assumptions.set('dc', self.cct.is_dc)
assumptions.set('causal', self.cct.is_causal)
elif isinstance(self.kind, str) and self.kind[0] == 'n':
assumptions.set('nid', self.kind)
# Create dictionary of node voltages
self._Vdict = Nodedict()
self._Vdict['0'] = vtype(0, **assumptions)
for n in self.cct.nodes:
index = self._node_index(n)
if index >= 0:
self._Vdict[n] = vtype(results[index], **assumptions).simplify()
else:
self._Vdict[n] = vtype(0, **assumptions)
num_nodes = len(self.cct.node_list) - 1
# Create dictionary of branch currents through elements
self._Idict = Branchdict()
for m, key in enumerate(self.unknown_branch_currents):
I = results[m + num_nodes]
if key in self.cct.elements and self.cct.elements[key].is_source:
I = -I
self._Idict[key] = itype(I, **assumptions).simplify()
# Calculate the branch currents. These should be lazily
# evaluated as required.
for elt in self.cct.elements.values():
if elt.type in ('R', 'NR', 'C'):
n1 = self.cct.node_map[elt.nodenames[0]]
n2 = self.cct.node_map[elt.nodenames[1]]
V1, V2 = self._Vdict[n1], self._Vdict[n2]
I = (V1.expr - V2.expr - elt.V0.expr) / elt.Z.expr
self._Idict[elt.name] = itype(I, **assumptions).simplify()
elif elt.type in ('I', ):
self._Idict[elt.name] = elt.Isc
@property
def A(self):
"""Return A matrix for MNA"""
return Matrix(self._A)
@property
def B(self):
"""Return B matrix for MNA"""
return Matrix(self._B)
@property
def C(self):
"""Return C matrix for MNA"""
return Matrix(self._C)
@property
def D(self):
"""Return D matrix for MNA"""
return Matrix(self._D)
@property
def G(self):
"""Return G matrix for MNA"""
return Matrix(self._G)
@property
def Z(self):
"""Return Z vector for MNA"""
return Vector(self._Z)
@property
def E(self):
"""Return E vector for MNA"""
return Vector(self._Es)
@property
def I(self):
"""Return I vector for MNA"""
return Vector(self._Is)
@property
def X(self):
"""Return X vector (of unknowns) for MNA"""
V = [self.cct.Vname('Vn%s' % node) for node in self.cct.node_list[1:]]
I = [self.cct.Iname('I%s' % branch) for branch in self.unknown_branch_currents]
return Vector(V + I)
@property
def Vdict(self):
"""Return dictionary of transform domain node voltages indexed by node
name"""
self._solve()
return self._Vdict
@property
def Idict(self):
"""Return dictionary of transform domain branch currents indexed by
component name"""
self._solve()
return self._Idict
def matrix_equations(self, form='default', invert=False):
"""System of equations used to find the unknowns.
Forms can be:
A y = b
b = A y
Ainv b = y
y = Ainv b
If `invert` is True, evaluate the matrix inverse."""
sys = SystemEquations(self._A, self._Z, self.X)
return sys.format(form, invert)
def equations(self, inverse=False):
"""System of equations used to find the unknowns.
If inverse is True, evaluate the matrix inverse.
This is for compatibility and is deprecated. Use
matrix_equations instead."""
return self.matrix_equations(invert=inverse)
|
mph-/lcapy
|
lcapy/mna.py
|
Python
|
lgpl-2.1
| 10,744
|
[
"Gaussian"
] |
7bf58d4f604d2e0c8a9d9edcb4e15c2e9712f92a41708df25f7658abc919f0d7
|
#
# Copyright (C) 2003-2013 Greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" This is a rough coverage test of the python wrapper
it's intended to be shallow, but broad
"""
from __future__ import print_function
import os,sys,tempfile,gzip
import unittest, doctest
from rdkit import RDConfig,rdBase
from rdkit import DataStructs
from rdkit import Chem
from rdkit import six
from rdkit.six import exec_
from rdkit import __version__
# Boost functions are NOT found by doctest, this "fixes" them
# by adding the doctests to a fake module
import imp
TestReplaceCore = imp.new_module("TestReplaceCore")
code = """
from rdkit.Chem import ReplaceCore
def ReplaceCore(*a, **kw):
'''%s
'''
return Chem.ReplaceCore(*a, **kw)
"""%"\n".join(
[x.lstrip() for x in Chem.ReplaceCore.__doc__.split("\n")])
exec_(code,TestReplaceCore.__dict__)
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(TestReplaceCore))
return tests
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
def getTotalFormalCharge(mol):
totalFormalCharge = 0
for atom in mol.GetAtoms():
totalFormalCharge += atom.GetFormalCharge()
return totalFormalCharge
def cmpFormalChargeBondOrder(self, mol1, mol2):
self.assertEqual(mol1.GetNumAtoms(), mol2.GetNumAtoms())
self.assertEqual(mol1.GetNumBonds(), mol2.GetNumBonds())
for i in range(mol1.GetNumAtoms()):
self.assertEqual(mol1.GetAtomWithIdx(i).GetFormalCharge(),
mol2.GetAtomWithIdx(i).GetFormalCharge())
for i in range(mol1.GetNumBonds()):
self.assertEqual(mol1.GetBondWithIdx(i).GetBondType(),
mol2.GetBondWithIdx(i).GetBondType())
def setResidueFormalCharge(mol, res, fc):
for query in res:
matches = mol.GetSubstructMatches(query)
for match in matches:
mol.GetAtomWithIdx(match[-1]).SetFormalCharge(fc)
def getBtList2(resMolSuppl):
btList2 = []
while (not resMolSuppl.atEnd()):
resMol = next(resMolSuppl)
bt = [];
for bond in resMol.GetBonds():
bt.append(int(bond.GetBondTypeAsDouble()))
btList2.append(bt)
for i in range(len(btList2)):
same = True
for j in range(len(btList2[i])):
if (not i):
continue
if (same):
same = (btList2[i][j] == btList2[i - 1][j])
if (i and same):
return None
return btList2
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test0Except(self):
try:
Chem.tossit()
except IndexError:
ok=1
else:
ok=0
assert ok
def test1Table(self):
tbl = Chem.GetPeriodicTable()
self.assertTrue(tbl)
self.assertTrue(feq(tbl.GetAtomicWeight(6),12.011))
self.assertTrue(feq(tbl.GetAtomicWeight("C"),12.011))
self.assertTrue(tbl.GetAtomicNumber('C')==6)
self.assertTrue(feq(tbl.GetRvdw(6),1.950))
self.assertTrue(feq(tbl.GetRvdw("C"),1.950))
self.assertTrue(feq(tbl.GetRcovalent(6),0.680))
self.assertTrue(feq(tbl.GetRcovalent("C"),0.680))
self.assertTrue(tbl.GetDefaultValence(6)==4)
self.assertTrue(tbl.GetDefaultValence("C")==4)
self.assertTrue(tuple(tbl.GetValenceList(6))==(4,))
self.assertTrue(tuple(tbl.GetValenceList("C"))==(4,))
self.assertTrue(tuple(tbl.GetValenceList(16))==(2,4,6))
self.assertTrue(tuple(tbl.GetValenceList("S"))==(2,4,6))
self.assertTrue(tbl.GetNOuterElecs(6)==4)
self.assertTrue(tbl.GetNOuterElecs("C")==4)
def test2Atom(self):
atom = Chem.Atom(6)
self.assertTrue(atom)
self.assertTrue(atom.GetAtomicNum()==6)
atom.SetAtomicNum(8)
self.assertTrue(atom.GetAtomicNum()==8)
atom = Chem.Atom("C")
self.assertTrue(atom)
self.assertTrue(atom.GetAtomicNum()==6)
def test3Bond(self):
# No longer relevant, bonds are not constructible from Python
pass
def test4Mol(self):
mol = Chem.Mol()
self.assertTrue(mol)
def test5Smiles(self):
mol = Chem.MolFromSmiles('n1ccccc1')
self.assertTrue(mol)
self.assertTrue(mol.GetNumAtoms()==6)
self.assertTrue(mol.GetNumAtoms(1)==6)
self.assertTrue(mol.GetNumAtoms(0)==11)
at = mol.GetAtomWithIdx(2)
self.assertTrue(at.GetAtomicNum()==6)
at = mol.GetAtomWithIdx(0)
self.assertTrue(at.GetAtomicNum()==7)
def _test6Bookmarks(self):
mol = Chem.MolFromSmiles('n1ccccc1')
self.assertTrue(mol)
self.assertTrue(not mol.HasAtomBookmark(0))
mol.SetAtomBookmark(mol.GetAtomWithIdx(0),0)
mol.SetAtomBookmark(mol.GetAtomWithIdx(1),1)
self.assertTrue(mol.HasAtomBookmark(0))
self.assertTrue(mol.HasAtomBookmark(1))
if 1:
self.assertTrue(not mol.HasBondBookmark(0))
self.assertTrue(not mol.HasBondBookmark(1))
mol.SetBondBookmark(mol.GetBondWithIdx(0),0)
mol.SetBondBookmark(mol.GetBondWithIdx(1),1)
self.assertTrue(mol.HasBondBookmark(0))
self.assertTrue(mol.HasBondBookmark(1))
at = mol.GetAtomWithBookmark(0)
self.assertTrue(at)
self.assertTrue(at.GetAtomicNum()==7)
mol.ClearAtomBookmark(0)
self.assertTrue(not mol.HasAtomBookmark(0))
self.assertTrue(mol.HasAtomBookmark(1))
mol.ClearAllAtomBookmarks()
self.assertTrue(not mol.HasAtomBookmark(0))
self.assertTrue(not mol.HasAtomBookmark(1))
mol.SetAtomBookmark(mol.GetAtomWithIdx(1),1)
if 1:
self.assertTrue(mol.HasBondBookmark(0))
self.assertTrue(mol.HasBondBookmark(1))
bond = mol.GetBondWithBookmark(0)
self.assertTrue(bond)
mol.ClearBondBookmark(0)
self.assertTrue(not mol.HasBondBookmark(0))
self.assertTrue(mol.HasBondBookmark(1))
mol.ClearAllBondBookmarks()
self.assertTrue(not mol.HasBondBookmark(0))
self.assertTrue(not mol.HasBondBookmark(1))
self.assertTrue(mol.HasAtomBookmark(1))
def test7Atom(self):
mol = Chem.MolFromSmiles('n1ccccc1C[CH2-]')
self.assertTrue(mol)
Chem.SanitizeMol(mol)
a0 = mol.GetAtomWithIdx(0)
a1 = mol.GetAtomWithIdx(1)
a6 = mol.GetAtomWithIdx(6)
a7 = mol.GetAtomWithIdx(7)
self.assertTrue(a0.GetAtomicNum()==7)
self.assertTrue(a0.GetSymbol()=='N')
self.assertTrue(a0.GetIdx()==0)
aList = [a0,a1,a6,a7]
self.assertTrue(a0.GetDegree()==2)
self.assertTrue(a1.GetDegree()==2)
self.assertTrue(a6.GetDegree()==2)
self.assertTrue(a7.GetDegree()==1)
self.assertTrue([x.GetDegree() for x in aList]==[2,2,2,1])
self.assertTrue([x.GetTotalNumHs() for x in aList]==[0,1,2,2])
self.assertTrue([x.GetNumImplicitHs() for x in aList]==[0,1,2,0])
self.assertTrue([x.GetExplicitValence() for x in aList]==[3,3,2,3])
self.assertTrue([x.GetImplicitValence() for x in aList]==[0,1,2,0])
self.assertTrue([x.GetFormalCharge() for x in aList]==[0,0,0,-1])
self.assertTrue([x.GetNoImplicit() for x in aList]==[0,0,0,1])
self.assertTrue([x.GetNumExplicitHs() for x in aList]==[0,0,0,2])
self.assertTrue([x.GetIsAromatic() for x in aList]==[1,1,0,0])
self.assertTrue([x.GetHybridization() for x in aList]==[Chem.HybridizationType.SP2,Chem.HybridizationType.SP2,
Chem.HybridizationType.SP3,Chem.HybridizationType.SP3],\
[x.GetHybridization() for x in aList])
def test8Bond(self):
mol = Chem.MolFromSmiles('n1ccccc1CC(=O)O')
self.assertTrue(mol)
Chem.SanitizeMol(mol)
# note bond numbering is funny because of ring closure
b0 = mol.GetBondWithIdx(0)
b6 = mol.GetBondWithIdx(6)
b7 = mol.GetBondWithIdx(7)
b8 = mol.GetBondWithIdx(8)
bList = [b0,b6,b7,b8]
self.assertTrue([x.GetBondType() for x in bList] ==
[Chem.BondType.AROMATIC,Chem.BondType.SINGLE,
Chem.BondType.DOUBLE,Chem.BondType.SINGLE])
self.assertTrue([x.GetIsAromatic() for x in bList] ==
[1,0,0,0])
self.assertEqual(bList[0].GetBondTypeAsDouble(),1.5)
self.assertEqual(bList[1].GetBondTypeAsDouble(),1.0)
self.assertEqual(bList[2].GetBondTypeAsDouble(),2.0)
self.assertTrue([x.GetIsConjugated()!=0 for x in bList] ==
[1,0,1,1],[x.GetIsConjugated()!=0 for x in bList])
self.assertTrue([x.GetBeginAtomIdx() for x in bList] ==
[0,6,7,7],[x.GetBeginAtomIdx() for x in bList])
self.assertTrue([x.GetBeginAtom().GetIdx() for x in bList] ==
[0,6,7,7])
self.assertTrue([x.GetEndAtomIdx() for x in bList] ==
[1,7,8,9])
self.assertTrue([x.GetEndAtom().GetIdx() for x in bList] ==
[1,7,8,9])
def test9Smarts(self):
query1 = Chem.MolFromSmarts('C(=O)O')
self.assertTrue(query1)
query2 = Chem.MolFromSmarts('C(=O)[O,N]')
self.assertTrue(query2)
query3 = Chem.MolFromSmarts('[$(C(=O)O)]')
self.assertTrue(query3)
mol = Chem.MolFromSmiles('CCC(=O)O')
self.assertTrue(mol)
self.assertTrue(mol.HasSubstructMatch(query1))
self.assertTrue(mol.HasSubstructMatch(query2))
self.assertTrue(mol.HasSubstructMatch(query3))
mol = Chem.MolFromSmiles('CCC(=O)N')
self.assertTrue(mol)
self.assertTrue(not mol.HasSubstructMatch(query1))
self.assertTrue(mol.HasSubstructMatch(query2))
self.assertTrue(not mol.HasSubstructMatch(query3))
def test10Iterators(self):
mol = Chem.MolFromSmiles('CCOC')
self.assertTrue(mol)
for atom in mol.GetAtoms():
self.assertTrue(atom)
ats = mol.GetAtoms()
ats[1]
with self.assertRaisesRegexp(IndexError, ""):
ats[12]
for bond in mol.GetBonds():
self.assertTrue(bond)
bonds = mol.GetBonds()
bonds[1]
with self.assertRaisesRegexp(IndexError, ""):
bonds[12]
def test11MolOps(self) :
mol = Chem.MolFromSmiles('C1=CC=C(C=C1)P(C2=CC=CC=C2)C3=CC=CC=C3')
self.assertTrue(mol)
smi = Chem.MolToSmiles(mol)
Chem.SanitizeMol(mol)
nr = Chem.GetSymmSSSR(mol)
self.assertTrue((len(nr) == 3))
def test12Smarts(self):
query1 = Chem.MolFromSmarts('C(=O)O')
self.assertTrue(query1)
query2 = Chem.MolFromSmarts('C(=O)[O,N]')
self.assertTrue(query2)
query3 = Chem.MolFromSmarts('[$(C(=O)O)]')
self.assertTrue(query3)
mol = Chem.MolFromSmiles('CCC(=O)O')
self.assertTrue(mol)
self.assertTrue(mol.HasSubstructMatch(query1))
self.assertTrue(mol.GetSubstructMatch(query1)==(2,3,4))
self.assertTrue(mol.HasSubstructMatch(query2))
self.assertTrue(mol.GetSubstructMatch(query2)==(2,3,4))
self.assertTrue(mol.HasSubstructMatch(query3))
self.assertTrue(mol.GetSubstructMatch(query3)==(2,))
mol = Chem.MolFromSmiles('CCC(=O)N')
self.assertTrue(mol)
self.assertTrue(not mol.HasSubstructMatch(query1))
self.assertTrue(not mol.GetSubstructMatch(query1))
self.assertTrue(mol.HasSubstructMatch(query2))
self.assertTrue(mol.GetSubstructMatch(query2)==(2,3,4))
self.assertTrue(not mol.HasSubstructMatch(query3))
mol = Chem.MolFromSmiles('OC(=O)CC(=O)O')
self.assertTrue(mol)
self.assertTrue(mol.HasSubstructMatch(query1))
self.assertTrue(mol.GetSubstructMatch(query1)==(1,2,0))
self.assertTrue(mol.GetSubstructMatches(query1)==((1,2,0),(4,5,6)))
self.assertTrue(mol.HasSubstructMatch(query2))
self.assertTrue(mol.GetSubstructMatch(query2)==(1,2,0))
self.assertTrue(mol.GetSubstructMatches(query2)==((1,2,0),(4,5,6)))
self.assertTrue(mol.HasSubstructMatch(query3))
self.assertTrue(mol.GetSubstructMatches(query3)==((1,),(4,)))
def test13Smarts(self):
# previous smarts problems:
query = Chem.MolFromSmarts('N(=,-C)')
self.assertTrue(query)
mol = Chem.MolFromSmiles('N#C')
self.assertTrue(not mol.HasSubstructMatch(query))
mol = Chem.MolFromSmiles('N=C')
self.assertTrue(mol.HasSubstructMatch(query))
mol = Chem.MolFromSmiles('NC')
self.assertTrue(mol.HasSubstructMatch(query))
query = Chem.MolFromSmarts('[Cl,$(O)]')
mol = Chem.MolFromSmiles('C(=O)O')
self.assertTrue(len(mol.GetSubstructMatches(query))==2)
mol = Chem.MolFromSmiles('C(=N)N')
self.assertTrue(len(mol.GetSubstructMatches(query))==0)
query = Chem.MolFromSmarts('[$([O,S]-[!$(*=O)])]')
mol = Chem.MolFromSmiles('CC(S)C(=O)O')
self.assertTrue(len(mol.GetSubstructMatches(query))==1)
mol = Chem.MolFromSmiles('C(=O)O')
self.assertTrue(len(mol.GetSubstructMatches(query))==0)
def test14Hs(self):
m = Chem.MolFromSmiles('CC(=O)[OH]')
self.assertEqual(m.GetNumAtoms(),4)
m2 = Chem.AddHs(m)
self.assertEqual(m2.GetNumAtoms(),8)
m2 = Chem.RemoveHs(m2)
self.assertEqual(m2.GetNumAtoms(),4)
m = Chem.MolFromSmiles('CC[H]',False)
self.assertEqual(m.GetNumAtoms(),3)
m2 = Chem.MergeQueryHs(m)
self.assertEqual(m2.GetNumAtoms(),2)
self.assertTrue(m2.GetAtomWithIdx(1).HasQuery())
m = Chem.MolFromSmiles('CC[H]',False)
self.assertEqual(m.GetNumAtoms(),3)
m1 = Chem.RemoveHs(m)
self.assertEqual(m1.GetNumAtoms(),2)
self.assertEqual(m1.GetAtomWithIdx(1).GetNumExplicitHs(),0)
m1 = Chem.RemoveHs(m,updateExplicitCount=True)
self.assertEqual(m1.GetNumAtoms(),2)
self.assertEqual(m1.GetAtomWithIdx(1).GetNumExplicitHs(),1)
# test merging of mapped hydrogens
m = Chem.MolFromSmiles('CC[H]',False)
m.GetAtomWithIdx(2).SetProp("molAtomMapNumber", "1")
self.assertEqual(m.GetNumAtoms(),3)
m2 = Chem.MergeQueryHs(m, mergeUnmappedOnly=True)
self.assertTrue(m2 is not None)
self.assertEqual(m2.GetNumAtoms(),3)
self.assertFalse(m2.GetAtomWithIdx(1).HasQuery())
# here the hydrogen is unmapped
# should be the same as merging all hydrogens
m = Chem.MolFromSmiles('CC[H]',False)
m.GetAtomWithIdx(1).SetProp("molAtomMapNumber", "1")
self.assertEqual(m.GetNumAtoms(),3)
m2 = Chem.MergeQueryHs(m, mergeUnmappedOnly=True)
self.assertTrue(m2 is not None)
self.assertEqual(m2.GetNumAtoms(),2)
self.assertTrue(m2.GetAtomWithIdx(1).HasQuery())
# test github758
m = Chem.MolFromSmiles('CCC')
self.assertEqual(m.GetNumAtoms(),3)
m = Chem.AddHs(m,onlyOnAtoms=(0,2))
self.assertEqual(m.GetNumAtoms(),9)
self.assertEqual(m.GetAtomWithIdx(0).GetDegree(),4)
self.assertEqual(m.GetAtomWithIdx(2).GetDegree(),4)
self.assertEqual(m.GetAtomWithIdx(1).GetDegree(),2)
def test15Neighbors(self):
m = Chem.MolFromSmiles('CC(=O)[OH]')
self.assertTrue(m.GetNumAtoms()==4)
a = m.GetAtomWithIdx(1)
ns = a.GetNeighbors()
self.assertTrue(len(ns)==3)
bs = a.GetBonds()
self.assertTrue(len(bs)==3)
for b in bs:
try:
a2 = b.GetOtherAtom(a)
except Exception:
a2=None
self.assertTrue(a2)
self.assertTrue(len(bs)==3)
def test16Pickle(self):
from rdkit.six.moves import cPickle
m = Chem.MolFromSmiles('C1=CN=CC=C1')
pkl = cPickle.dumps(m)
m2 = cPickle.loads(pkl)
smi1 = Chem.MolToSmiles(m)
smi2 = Chem.MolToSmiles(m2)
self.assertTrue(smi1==smi2)
def test16Props(self):
m = Chem.MolFromSmiles('C1=CN=CC=C1')
self.assertTrue(not m.HasProp('prop1'))
self.assertTrue(not m.HasProp('prop2'))
self.assertTrue(not m.HasProp('prop2'))
m.SetProp('prop1','foob')
self.assertTrue(not m.HasProp('prop2'))
self.assertTrue(m.HasProp('prop1'))
self.assertTrue(m.GetProp('prop1')=='foob')
self.assertTrue(not m.HasProp('propo'))
try:
m.GetProp('prop2')
except KeyError:
ok=1
else:
ok=0
self.assertTrue(ok)
# test computed properties
m.SetProp('cprop1', 'foo', 1)
m.SetProp('cprop2', 'foo2', 1)
m.ClearComputedProps()
self.assertTrue(not m.HasProp('cprop1'))
self.assertTrue(not m.HasProp('cprop2'))
m.SetDoubleProp("a", 2.0)
self.assertTrue(m.GetDoubleProp("a") == 2.0)
try:
self.assertTrue(m.GetIntProp("a") == 2.0)
raise Exception("Expected runtime exception")
except ValueError:
pass
try:
self.assertTrue(m.GetUnsignedProp("a") == 2.0)
raise Exception("Expected runtime exception")
except ValueError:
pass
m.SetDoubleProp("a", -2)
self.assertTrue(m.GetDoubleProp("a") == -2.0)
m.SetIntProp("a", -2)
self.assertTrue(m.GetIntProp("a") == -2)
try:
m.SetUnsignedProp("a", -2)
raise Exception("Expected failure with negative unsigned number")
except OverflowError:
pass
m.SetBoolProp("a", False)
self.assertTrue(m.GetBoolProp("a") == False)
self.assertEquals(m.GetPropsAsDict(), {'a': False, 'prop1': 'foob'})
m.SetDoubleProp("b", 1000.0)
m.SetUnsignedProp("c", 2000)
m.SetIntProp("d", -2)
m.SetUnsignedProp("e", 2, True)
self.assertEquals(m.GetPropsAsDict(False, True),
{'a': False, 'c': 2000, 'b': 1000.0, 'e': 2,
'd': -2, 'prop1': 'foob'})
def test17Kekulize(self):
m = Chem.MolFromSmiles('c1ccccc1')
smi = Chem.MolToSmiles(m)
self.assertTrue(smi=='c1ccccc1')
Chem.Kekulize(m)
smi = Chem.MolToSmiles(m)
self.assertTrue(smi=='c1ccccc1')
m = Chem.MolFromSmiles('c1ccccc1')
smi = Chem.MolToSmiles(m)
self.assertTrue(smi=='c1ccccc1')
Chem.Kekulize(m,1)
smi = Chem.MolToSmiles(m)
self.assertTrue(smi=='C1=CC=CC=C1', smi)
def test18Paths(self):
m = Chem.MolFromSmiles("C1CC2C1CC2")
#self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==7)
#print(Chem.FindAllPathsOfLengthN(m,3,useBonds=0))
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,2,useBonds=1))==10,
Chem.FindAllPathsOfLengthN(m,2,useBonds=1))
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,3,useBonds=1))==14)
m = Chem.MolFromSmiles('C1CC1C')
self.assertTrue(m)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==4)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,2,useBonds=1))==5)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,3,useBonds=1))==3,Chem.FindAllPathsOfLengthN(m,3,useBonds=1))
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,4,useBonds=1))==1,Chem.FindAllPathsOfLengthN(m,4,useBonds=1))
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,5,useBonds=1))==0,Chem.FindAllPathsOfLengthN(m,5,useBonds=1))
#
# Hexane example from Hall-Kier Rev.Comp.Chem. paper
# Rev. Comp. Chem. vol 2, 367-422, (1991)
#
m = Chem.MolFromSmiles("CCCCCC")
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==5)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,2,useBonds=1))==4)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,3,useBonds=1))==3)
m = Chem.MolFromSmiles("CCC(C)CC")
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==5)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,2,useBonds=1))==5)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,3,useBonds=1))==4,Chem.FindAllPathsOfLengthN(m,3,useBonds=1))
m = Chem.MolFromSmiles("CCCC(C)C")
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==5)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,2,useBonds=1))==5)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,3,useBonds=1))==3)
m = Chem.MolFromSmiles("CC(C)C(C)C")
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==5)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,2,useBonds=1))==6)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,3,useBonds=1))==4)
m = Chem.MolFromSmiles("CC(C)(C)CC")
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==5)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,2,useBonds=1))==7)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,3,useBonds=1))==3,Chem.FindAllPathsOfLengthN(m,3,useBonds=1))
m = Chem.MolFromSmiles("C1CCCCC1")
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==6)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,2,useBonds=1))==6)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,3,useBonds=1))==6)
m = Chem.MolFromSmiles("C1CC2C1CC2")
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==7)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,2,useBonds=1))==10,
Chem.FindAllPathsOfLengthN(m,2,useBonds=1))
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,3,useBonds=1))==14)
m = Chem.MolFromSmiles("CC2C1CCC12")
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==7)
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,2,useBonds=1))==11)
# FIX: this result disagrees with the paper (which says 13),
# but it seems right
self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,3,useBonds=1))==15,
Chem.FindAllPathsOfLengthN(m,3,useBonds=1))
def test19Subgraphs(self):
m = Chem.MolFromSmiles('C1CC1C')
self.assertTrue(m)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,1,0))==4)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,2))==5)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,3))==4)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,4))==1)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,5))==0)
#
# Hexane example from Hall-Kier Rev.Comp.Chem. paper
# Rev. Comp. Chem. vol 2, 367-422, (1991)
#
m = Chem.MolFromSmiles("CCCCCC")
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,1))==5)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,2))==4)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,3))==3)
l = Chem.FindAllSubgraphsOfLengthMToN(m,1,3)
self.assertEqual(len(l),3)
self.assertEqual(len(l[0]),5)
self.assertEqual(len(l[1]),4)
self.assertEqual(len(l[2]),3)
self.assertRaises(ValueError,lambda :Chem.FindAllSubgraphsOfLengthMToN(m,4,3))
m = Chem.MolFromSmiles("CCC(C)CC")
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,1))==5)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,2))==5)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,3))==5)
m = Chem.MolFromSmiles("CCCC(C)C")
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,1))==5)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,2))==5)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,3))==4)
m = Chem.MolFromSmiles("CC(C)C(C)C")
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,1))==5)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,2))==6)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,3))==6)
m = Chem.MolFromSmiles("CC(C)(C)CC")
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,1))==5)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,2))==7)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,3))==7,Chem.FindAllSubgraphsOfLengthN(m,3))
m = Chem.MolFromSmiles("C1CCCCC1")
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,1))==6)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,2))==6)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,3))==6)
#self.assertTrue(len(Chem.FindUniqueSubgraphsOfLengthN(m,1))==1)
self.assertTrue(len(Chem.FindUniqueSubgraphsOfLengthN(m,2))==1)
self.assertTrue(len(Chem.FindUniqueSubgraphsOfLengthN(m,3))==1)
m = Chem.MolFromSmiles("C1CC2C1CC2")
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,1))==7)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,2))==10)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,3))==16)
m = Chem.MolFromSmiles("CC2C1CCC12")
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,1))==7)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,2))==11)
self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m,3))==18,
len(Chem.FindAllSubgraphsOfLengthN(m,3)))
def test20IsInRing(self):
m = Chem.MolFromSmiles('C1CCC1C')
self.assertTrue(m)
self.assertTrue(m.GetAtomWithIdx(0).IsInRingSize(4))
self.assertTrue(m.GetAtomWithIdx(1).IsInRingSize(4))
self.assertTrue(m.GetAtomWithIdx(2).IsInRingSize(4))
self.assertTrue(m.GetAtomWithIdx(3).IsInRingSize(4))
self.assertTrue(not m.GetAtomWithIdx(4).IsInRingSize(4))
self.assertTrue(not m.GetAtomWithIdx(0).IsInRingSize(3))
self.assertTrue(not m.GetAtomWithIdx(1).IsInRingSize(3))
self.assertTrue(not m.GetAtomWithIdx(2).IsInRingSize(3))
self.assertTrue(not m.GetAtomWithIdx(3).IsInRingSize(3))
self.assertTrue(not m.GetAtomWithIdx(4).IsInRingSize(3))
self.assertTrue(m.GetBondWithIdx(0).IsInRingSize(4))
self.assertTrue(not m.GetBondWithIdx(3).IsInRingSize(4))
self.assertTrue(not m.GetBondWithIdx(0).IsInRingSize(3))
self.assertTrue(not m.GetBondWithIdx(3).IsInRingSize(3))
def test21Robustification(self):
ok = False
# FIX: at the moment I can't figure out how to catch the
# actual exception that BPL is throwinng when it gets
# invalid arguments (Boost.Python.ArgumentError)
try:
Chem.MolFromSmiles('C=O').HasSubstructMatch(Chem.MolFromSmarts('fiib'))
#except ValueError:
# ok=True
except Exception:
ok=True
self.assertTrue(ok )
def test22DeleteSubstruct(self) :
query = Chem.MolFromSmarts('C(=O)O')
mol = Chem.MolFromSmiles('CCC(=O)O')
nmol = Chem.DeleteSubstructs(mol, query)
self.assertTrue(Chem.MolToSmiles(nmol) == 'CC')
mol = Chem.MolFromSmiles('CCC(=O)O.O=CO')
# now delete only fragments
nmol = Chem.DeleteSubstructs(mol, query, 1)
self.assertTrue(Chem.MolToSmiles(nmol) == 'CCC(=O)O',Chem.MolToSmiles(nmol))
mol = Chem.MolFromSmiles('CCC(=O)O.O=CO')
nmol = Chem.DeleteSubstructs(mol, query, 0)
self.assertTrue(Chem.MolToSmiles(nmol) == 'CC')
mol = Chem.MolFromSmiles('CCCO')
nmol = Chem.DeleteSubstructs(mol, query, 0)
self.assertTrue(Chem.MolToSmiles(nmol) == 'CCCO')
# Issue 96 prevented this from working:
mol = Chem.MolFromSmiles('CCC(=O)O.O=CO')
nmol = Chem.DeleteSubstructs(mol, query, 1)
self.assertTrue(Chem.MolToSmiles(nmol) == 'CCC(=O)O')
nmol = Chem.DeleteSubstructs(nmol, query, 1)
self.assertTrue(Chem.MolToSmiles(nmol) == 'CCC(=O)O')
nmol = Chem.DeleteSubstructs(nmol, query, 0)
self.assertTrue(Chem.MolToSmiles(nmol) == 'CC')
def test23MolFileParsing(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','triazine.mol')
#fileN = "../FileParsers/test_data/triazine.mol"
with open(fileN,'r') as inF:
inD = inF.read()
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==9)
m1 = Chem.MolFromMolFile(fileN)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==9)
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','triazine.mof')
self.assertRaises(IOError,lambda :Chem.MolFromMolFile(fileN))
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','list-query.mol')
query = Chem.MolFromMolFile(fileN)
smi = Chem.MolToSmiles(query)
self.assertEqual(smi,'c1ccccc1')
smi = Chem.MolToSmarts(query)
self.assertEqual(smi,'[#6]1:[#6]:[#6]:[#6]:[#6]:[#6,#7,#15]:1',smi)
query = Chem.MolFromMolFile(fileN,sanitize=False)
smi = Chem.MolToSmiles(query)
self.assertEqual(smi,'C1=CC=CC=C1')
query.UpdatePropertyCache()
smi = Chem.MolToSmarts(query)
self.assertEqual(smi,'[#6]1=[#6]-[#6]=[#6]-[#6]=[#6,#7,#15]-1')
smi = "C1=CC=CC=C1"
mol = Chem.MolFromSmiles(smi,0)
self.assertTrue(mol.HasSubstructMatch(query))
Chem.SanitizeMol(mol)
self.assertTrue(not mol.HasSubstructMatch(query))
mol = Chem.MolFromSmiles('N1=CC=CC=C1',0)
self.assertTrue(mol.HasSubstructMatch(query))
mol = Chem.MolFromSmiles('S1=CC=CC=C1',0)
self.assertTrue(not mol.HasSubstructMatch(query))
mol = Chem.MolFromSmiles('P1=CC=CC=C1',0)
self.assertTrue(mol.HasSubstructMatch(query))
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','issue123.mol')
mol = Chem.MolFromMolFile(fileN)
self.assertTrue(mol)
self.assertEqual(mol.GetNumAtoms(),23)
mol = Chem.MolFromMolFile(fileN,removeHs=False)
self.assertTrue(mol)
self.assertEqual(mol.GetNumAtoms(),39)
# test23 was for Chem.DaylightFingerprint, which is deprecated
def test24RDKFingerprint(self):
from rdkit import DataStructs
m1 = Chem.MolFromSmiles('C1=CC=CC=C1')
fp1 = Chem.RDKFingerprint(m1)
self.assertTrue(len(fp1)==2048)
m2 = Chem.MolFromSmiles('C1=CC=CC=C1')
fp2 = Chem.RDKFingerprint(m2)
tmp = DataStructs.TanimotoSimilarity(fp1,fp2)
self.assertTrue(tmp==1.0,tmp)
m2 = Chem.MolFromSmiles('C1=CC=CC=N1')
fp2 = Chem.RDKFingerprint(m2)
self.assertTrue(len(fp2)==2048)
tmp = DataStructs.TanimotoSimilarity(fp1,fp2)
self.assertTrue(tmp<1.0,tmp)
self.assertTrue(tmp>0.0,tmp)
fp3 = Chem.RDKFingerprint(m1,tgtDensity=0.3)
self.assertTrue(len(fp3)<2048)
m1 = Chem.MolFromSmiles('C1=CC=CC=C1')
fp1 = Chem.RDKFingerprint(m1)
m2 = Chem.MolFromSmiles('C1=CC=CC=N1')
fp2 = Chem.RDKFingerprint(m2)
self.assertNotEqual(fp1,fp2)
atomInvariants=[1]*6
fp1 = Chem.RDKFingerprint(m1,atomInvariants=atomInvariants)
fp2 = Chem.RDKFingerprint(m2,atomInvariants=atomInvariants)
self.assertEqual(fp1,fp2)
m2 = Chem.MolFromSmiles('C1CCCCN1')
fp1 = Chem.RDKFingerprint(m1,atomInvariants=atomInvariants,useBondOrder=False)
fp2 = Chem.RDKFingerprint(m2,atomInvariants=atomInvariants,useBondOrder=False)
self.assertEqual(fp1,fp2)
# rooted at atom
m1 = Chem.MolFromSmiles('CCCCCO')
fp1 = Chem.RDKFingerprint(m1,1,4,nBitsPerHash=1,fromAtoms=[0])
self.assertEqual(fp1.GetNumOnBits(),4)
m1 = Chem.MolFromSmiles('CCCCCO')
fp1 = Chem.RDKFingerprint(m1,1,4,nBitsPerHash=1,fromAtoms=[0,5])
self.assertEqual(fp1.GetNumOnBits(),8)
# test sf.net issue 270:
fp1 = Chem.RDKFingerprint(m1,atomInvariants=[x.GetAtomicNum()+10 for x in m1.GetAtoms()])
# atomBits
m1 = Chem.MolFromSmiles('CCCO')
l=[]
fp1 = Chem.RDKFingerprint(m1,minPath=1,maxPath=2,nBitsPerHash=1,atomBits=l)
self.assertEqual(fp1.GetNumOnBits(),4)
self.assertEqual(len(l),m1.GetNumAtoms())
self.assertEqual(len(l[0]),2)
self.assertEqual(len(l[1]),3)
self.assertEqual(len(l[2]),4)
self.assertEqual(len(l[3]),2)
def test25SDMolSupplier(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf')
#fileN = "../FileParsers/test_data/NCI_aids_few.sdf"
sdSup = Chem.SDMolSupplier(fileN)
molNames = ["48", "78", "128", "163", "164", "170", "180", "186",
"192", "203", "210", "211", "213", "220", "229", "256"]
chgs192 = {8:1, 11:1, 15:-1, 18:-1, 20:1, 21:1, 23:-1, 25:-1}
i = 0
for mol in sdSup :
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
if (mol.GetProp("_Name") == "192") :
# test parsed charges on one of the molecules
for id in chgs192.keys() :
self.assertTrue(mol.GetAtomWithIdx(id).GetFormalCharge() == chgs192[id])
self.assertRaises(StopIteration,lambda:six.next(sdSup))
sdSup.reset()
ns = [mol.GetProp("_Name") for mol in sdSup]
self.assertTrue(ns == molNames)
sdSup = Chem.SDMolSupplier(fileN, 0)
for mol in sdSup :
self.assertTrue(not mol.HasProp("numArom"))
sdSup = Chem.SDMolSupplier(fileN)
self.assertTrue(len(sdSup) == 16)
mol = sdSup[5]
self.assertTrue(mol.GetProp("_Name") == "170")
# test handling of H removal:
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','withHs.sdf')
sdSup = Chem.SDMolSupplier(fileN)
m = six.next(sdSup)
self.assertTrue(m)
self.assertTrue(m.GetNumAtoms()==23)
m = six.next(sdSup)
self.assertTrue(m)
self.assertTrue(m.GetNumAtoms()==28)
sdSup = Chem.SDMolSupplier(fileN,removeHs=False)
m = six.next(sdSup)
self.assertTrue(m)
self.assertTrue(m.GetNumAtoms()==39)
m = six.next(sdSup)
self.assertTrue(m)
self.assertTrue(m.GetNumAtoms()==30)
with open(fileN,'rb') as dFile:
d = dFile.read()
sdSup.SetData(d)
m = six.next(sdSup)
self.assertTrue(m)
self.assertTrue(m.GetNumAtoms()==23)
m = six.next(sdSup)
self.assertTrue(m)
self.assertTrue(m.GetNumAtoms()==28)
sdSup.SetData(d,removeHs=False)
m = six.next(sdSup)
self.assertTrue(m)
self.assertTrue(m.GetNumAtoms()==39)
m = six.next(sdSup)
self.assertTrue(m)
self.assertTrue(m.GetNumAtoms()==30)
# test strictParsing1:
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','strictLax1.sdf')
#strict from file
sdSup = Chem.SDMolSupplier(fileN, strictParsing = True);
i = 0
for mol in sdSup:
self.assertTrue(mol.HasProp("_Name"))
if (i == 0):
self.assertTrue(not mol.HasProp("ID"))
self.assertTrue(not mol.HasProp("ANOTHER_PROPERTY"))
i += 1
self.assertTrue(i == 2)
#lax from file
sdSup = Chem.SDMolSupplier(fileN, strictParsing = False);
i = 0
for mol in sdSup:
self.assertTrue(mol.HasProp("_Name"))
self.assertTrue(mol.HasProp("ID"))
self.assertTrue(mol.HasProp("ANOTHER_PROPERTY"))
i += 1
self.assertTrue(i == 2)
#strict from text
with open(fileN,'rb') as dFile:
d = dFile.read()
sdSup = Chem.SDMolSupplier();
sdSup.SetData(d, strictParsing = True)
i = 0
for mol in sdSup:
self.assertTrue(mol.HasProp("_Name"))
if (i == 0):
self.assertTrue(not mol.HasProp("ID"))
self.assertTrue(not mol.HasProp("ANOTHER_PROPERTY"))
i += 1
self.assertTrue(i == 2)
#lax from text
sdSup = Chem.SDMolSupplier();
sdSup.SetData(d, strictParsing = False)
i = 0
for mol in sdSup:
self.assertTrue(mol.HasProp("_Name"))
self.assertTrue(mol.HasProp("ID"))
self.assertTrue(mol.HasProp("ANOTHER_PROPERTY"))
i += 1
self.assertTrue(i == 2)
# test strictParsing2:
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','strictLax2.sdf')
#strict from file
sdSup = Chem.SDMolSupplier(fileN, strictParsing = True);
i = 0
for mol in sdSup:
self.assertTrue(mol.HasProp("_Name"))
self.assertTrue(mol.HasProp("ID"))
self.assertTrue(mol.GetProp("ID") == "Lig1")
self.assertTrue(mol.HasProp("ANOTHER_PROPERTY"))
self.assertTrue(mol.GetProp("ANOTHER_PROPERTY") == \
"No blank line before dollars\n" \
"$$$$\n" \
"Structure1\n" \
"csChFnd70/05230312262D")
i += 1
self.assertTrue(i == 1)
#lax from file
sdSup = Chem.SDMolSupplier(fileN, strictParsing = False);
i = 0
for mol in sdSup:
self.assertTrue(mol.HasProp("_Name"))
self.assertTrue(mol.HasProp("ID"))
self.assertTrue(mol.GetProp("ID") == "Lig2")
self.assertTrue(mol.HasProp("ANOTHER_PROPERTY"))
self.assertTrue(mol.GetProp("ANOTHER_PROPERTY") == "Value2")
i += 1
self.assertTrue(i == 1)
#strict from text
with open(fileN,'rb') as dFile:
d = dFile.read()
sdSup = Chem.SDMolSupplier();
sdSup.SetData(d, strictParsing = True)
i = 0
for mol in sdSup:
self.assertTrue(mol.HasProp("_Name"))
self.assertTrue(mol.HasProp("ID"))
self.assertTrue(mol.GetProp("ID") == "Lig1")
self.assertTrue(mol.HasProp("ANOTHER_PROPERTY"))
self.assertTrue(mol.GetProp("ANOTHER_PROPERTY") == \
"No blank line before dollars\n" \
"$$$$\n" \
"Structure1\n" \
"csChFnd70/05230312262D")
i += 1
self.assertTrue(i == 1)
#lax from text
sdSup = Chem.SDMolSupplier();
sdSup.SetData(d, strictParsing = False)
i = 0
for mol in sdSup:
self.assertTrue(mol.HasProp("_Name"))
self.assertTrue(mol.HasProp("ID"))
self.assertTrue(mol.GetProp("ID") == "Lig2")
self.assertTrue(mol.HasProp("ANOTHER_PROPERTY"))
self.assertTrue(mol.GetProp("ANOTHER_PROPERTY") == "Value2")
i += 1
self.assertTrue(i == 1)
def test26SmiMolSupplier(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','first_200.tpsa.csv')
#fileN = "../FileParsers/test_data/first_200.tpsa.csv"
smiSup = Chem.SmilesMolSupplier(fileN, ",", 0, -1)
mol = smiSup[16];
self.assertTrue(mol.GetProp("TPSA") == "46.25")
mol = smiSup[8];
self.assertTrue(mol.GetProp("TPSA") == "65.18")
self.assertTrue(len(smiSup) == 200)
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','fewSmi.csv')
#fileN = "../FileParsers/test_data/fewSmi.csv"
smiSup = Chem.SmilesMolSupplier(fileN, delimiter=",",
smilesColumn=1, nameColumn=0,
titleLine=0)
names = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
i = 0
for mol in smiSup:
self.assertTrue(mol.GetProp("_Name") == names[i])
i += 1
mol = smiSup[3]
self.assertTrue(mol.GetProp("_Name") == "4")
self.assertTrue(mol.GetProp("Column_2") == "82.78")
# and test doing a supplier from text:
with open(fileN,'r') as inF:
inD = inF.read()
smiSup.SetData(inD, delimiter=",",
smilesColumn=1, nameColumn=0,
titleLine=0)
names = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
i = 0
# iteration interface:
for mol in smiSup:
self.assertTrue(mol.GetProp("_Name") == names[i])
i += 1
self.assertTrue(i==10)
# random access:
mol = smiSup[3]
self.assertTrue(len(smiSup)==10)
self.assertTrue(mol.GetProp("_Name") == "4")
self.assertTrue(mol.GetProp("Column_2") == "82.78")
# issue 113:
smiSup.SetData(inD, delimiter=",",
smilesColumn=1, nameColumn=0,
titleLine=0)
self.assertTrue(len(smiSup)==10)
# and test failure handling:
inD = """mol-1,CCC
mol-2,CCCC
mol-3,fail
mol-4,CCOC
"""
smiSup.SetData(inD, delimiter=",",
smilesColumn=1, nameColumn=0,
titleLine=0)
# there are 4 entries in the supplier:
self.assertTrue(len(smiSup)==4)
# but the 3rd is a None:
self.assertTrue(smiSup[2] is None)
text="Id SMILES Column_2\n"+\
"mol-1 C 1.0\n"+\
"mol-2 CC 4.0\n"+\
"mol-4 CCCC 16.0"
smiSup.SetData(text, delimiter=" ",
smilesColumn=1, nameColumn=0,
titleLine=1)
self.assertTrue(len(smiSup)==3)
self.assertTrue(smiSup[0])
self.assertTrue(smiSup[1])
self.assertTrue(smiSup[2])
m = [x for x in smiSup]
self.assertTrue(smiSup[2])
self.assertTrue(len(m)==3)
self.assertTrue(m[0].GetProp("Column_2")=="1.0")
# test simple parsing and Issue 114:
smis = ['CC','CCC','CCOC','CCCOCC','CCCOCCC']
inD = '\n'.join(smis)
smiSup.SetData(inD, delimiter=",",
smilesColumn=0, nameColumn=-1,
titleLine=0)
self.assertTrue(len(smiSup)==5)
m = [x for x in smiSup]
self.assertTrue(smiSup[4])
self.assertTrue(len(m)==5)
# order dependance:
smiSup.SetData(inD, delimiter=",",
smilesColumn=0, nameColumn=-1,
titleLine=0)
self.assertTrue(smiSup[4])
self.assertTrue(len(smiSup)==5)
# this was a nasty BC:
# asking for a particular entry with a higher index than what we've
# already seen resulted in a duplicate:
smis = ['CC','CCC','CCOC','CCCCOC']
inD = '\n'.join(smis)
smiSup.SetData(inD, delimiter=",",
smilesColumn=0, nameColumn=-1,
titleLine=0)
m = six.next(smiSup)
m = smiSup[3]
self.assertTrue(len(smiSup)==4)
with self.assertRaisesRegexp(Exception, ""):
smiSup[4]
smiSup.SetData(inD, delimiter=",",
smilesColumn=0, nameColumn=-1,
titleLine=0)
with self.assertRaisesRegexp(Exception, ""):
smiSup[4]
sys.stderr.write('>>> This may result in an infinite loop. It should finish almost instantly\n')
self.assertEqual(len(smiSup), 4)
sys.stderr.write('<<< OK, it finished.\n')
def test27SmilesWriter(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','fewSmi.csv')
#fileN = "../FileParsers/test_data/fewSmi.csv"
smiSup = Chem.SmilesMolSupplier(fileN, delimiter=",",
smilesColumn=1, nameColumn=0,
titleLine=0)
propNames = []
propNames.append("Column_2")
ofile = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','Wrap','test_data','outSmiles.txt')
writer = Chem.SmilesWriter(ofile)
writer.SetProps(propNames)
for mol in smiSup :
writer.write(mol)
writer.flush()
def test28SmilesReverse(self):
names = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
props = ["34.14","25.78","106.51","82.78","60.16","87.74","37.38","77.28","65.18","0.00"]
ofile = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','Wrap','test_data','outSmiles.txt')
#ofile = "test_data/outSmiles.csv"
smiSup = Chem.SmilesMolSupplier(ofile)
i = 0
for mol in smiSup:
#print([repr(x) for x in mol.GetPropNames()])
self.assertTrue(mol.GetProp("_Name") == names[i])
self.assertTrue(mol.GetProp("Column_2") == props[i])
i += 1
def writerSDFile(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf')
#fileN = "../FileParsers/test_data/NCI_aids_few.sdf"
ofile = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','Wrap','test_data','outNCI_few.sdf');
writer = Chem.SDWriter(ofile);
sdSup = Chem.SDMolSupplier(fileN)
for mol in sdSup :
writer.write(mol)
writer.flush()
def test29SDWriterLoop(self) :
self.writerSDFile()
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','Wrap',
'test_data','outNCI_few.sdf')
sdSup = Chem.SDMolSupplier(fileN)
molNames = ["48", "78", "128", "163", "164", "170", "180", "186",
"192", "203", "210", "211", "213", "220", "229", "256"]
chgs192 = {8:1, 11:1, 15:-1, 18:-1, 20:1, 21:1, 23:-1, 25:-1}
i = 0
for mol in sdSup :
#print('mol:',mol)
#print('\t',molNames[i])
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
if (mol.GetProp("_Name") == "192") :
# test parsed charges on one of the molecules
for id in chgs192.keys() :
self.assertTrue(mol.GetAtomWithIdx(id).GetFormalCharge() == chgs192[id])
def test30Issues109and110(self) :
""" issues 110 and 109 were both related to handling of explicit Hs in
SMILES input.
"""
m1 = Chem.MolFromSmiles('N12[CH](SC(C)(C)[CH]1C(O)=O)[CH](C2=O)NC(=O)[CH](N)c3ccccc3')
self.assertTrue(m1.GetNumAtoms()==24)
m2 = Chem.MolFromSmiles('C1C=C([CH](N)C(=O)N[C]2([H])[C]3([H])SC(C)(C)[CH](C(=O)O)N3C(=O)2)C=CC=1')
self.assertTrue(m2.GetNumAtoms()==24)
smi1 = Chem.MolToSmiles(m1)
smi2 = Chem.MolToSmiles(m2)
self.assertTrue(smi1==smi2)
m1 = Chem.MolFromSmiles('[H]CCl')
self.assertTrue(m1.GetNumAtoms()==2)
self.assertTrue(m1.GetAtomWithIdx(0).GetNumExplicitHs()==1)
m1 = Chem.MolFromSmiles('[H][CH2]Cl')
self.assertTrue(m1.GetNumAtoms()==2)
self.assertTrue(m1.GetAtomWithIdx(0).GetNumExplicitHs()==3)
m2 = Chem.AddHs(m1)
self.assertTrue(m2.GetNumAtoms()==5)
m2 = Chem.RemoveHs(m2)
self.assertTrue(m2.GetNumAtoms()==2)
def test31ChiralitySmiles(self) :
m1 = Chem.MolFromSmiles('F[C@](Br)(I)Cl')
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==5)
self.assertTrue(Chem.MolToSmiles(m1,1)=='F[C@](Cl)(Br)I',Chem.MolToSmiles(m1,1))
m1 = Chem.MolFromSmiles('CC1C[C@@]1(Cl)F')
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==6)
self.assertTrue(Chem.MolToSmiles(m1,1)=='CC1C[C@]1(F)Cl',Chem.MolToSmiles(m1,1))
m1 = Chem.MolFromSmiles('CC1C[C@]1(Cl)F')
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==6)
self.assertTrue(Chem.MolToSmiles(m1,1)=='CC1C[C@@]1(F)Cl',Chem.MolToSmiles(m1,1))
def test31aChiralitySubstructs(self) :
m1 = Chem.MolFromSmiles('CC1C[C@@]1(Cl)F')
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==6)
self.assertTrue(Chem.MolToSmiles(m1,1)=='CC1C[C@]1(F)Cl',Chem.MolToSmiles(m1,1))
m2 = Chem.MolFromSmiles('CC1C[C@]1(Cl)F')
self.assertTrue(m2 is not None)
self.assertTrue(m2.GetNumAtoms()==6)
self.assertTrue(Chem.MolToSmiles(m2,1)=='CC1C[C@@]1(F)Cl',Chem.MolToSmiles(m2,1))
self.assertTrue(m1.HasSubstructMatch(m1))
self.assertTrue(m1.HasSubstructMatch(m2))
self.assertTrue(m1.HasSubstructMatch(m1,useChirality=True))
self.assertTrue(not m1.HasSubstructMatch(m2,useChirality=True))
def _test32MolFilesWithChirality(self) :
inD = """chiral1.mol
ChemDraw10160313232D
5 4 0 0 0 0 0 0 0 0999 V2000
0.0553 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
0.0553 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7697 -0.6188 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0
-0.6592 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-0.7697 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 1 0
2 4 1 1
2 5 1 0
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==5)
self.assertTrue(smi=='F[C@](Cl)(Br)I',smi)
inD = """chiral2.cdxml
ChemDraw10160314052D
5 4 0 0 0 0 0 0 0 0999 V2000
0.0553 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
0.0553 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7697 -0.6188 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0
-0.6592 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-0.7697 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 1 0
2 4 1 6
2 5 1 0
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==5)
self.assertTrue(Chem.MolToSmiles(m1,1)=='F[C@@](Cl)(Br)I')
inD = """chiral1.mol
ChemDraw10160313232D
5 4 0 0 0 0 0 0 0 0999 V2000
0.0553 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
0.0553 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7697 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
-0.6592 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
0.7697 -0.6188 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 1 0
2 4 1 1
2 5 1 0
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==5)
self.assertTrue(Chem.MolToSmiles(m1,1)=='F[C@](Cl)(Br)I')
inD = """chiral1.mol
ChemDraw10160313232D
5 4 0 0 0 0 0 0 0 0999 V2000
0.0553 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7697 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
-0.6592 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
0.7697 -0.6188 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0
0.0553 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
1 3 1 1
1 4 1 0
1 5 1 0
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==5)
self.assertTrue(Chem.MolToSmiles(m1,1)=='F[C@](Cl)(Br)I')
inD = """chiral3.mol
ChemDraw10160314362D
4 3 0 0 0 0 0 0 0 0999 V2000
0.4125 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
0.4125 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.3020 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-0.4125 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 1 1
2 4 1 0
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==4)
self.assertTrue(Chem.MolToSmiles(m1,1)=='F[C@H](Cl)Br')
inD = """chiral4.mol
ChemDraw10160314362D
4 3 0 0 0 0 0 0 0 0999 V2000
0.4125 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
0.4125 -0.2062 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
-0.3020 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-0.4125 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 1 1
2 4 1 0
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==4)
self.assertTrue(Chem.MolToSmiles(m1,1)=='FN(Cl)Br')
inD = """chiral5.mol
ChemDraw10160314362D
4 3 0 0 0 0 0 0 0 0999 V2000
0.4125 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
0.4125 -0.2062 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
-0.3020 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-0.4125 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 1 1
2 4 1 0
M CHG 1 2 1
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==4)
self.assertTrue(Chem.MolToSmiles(m1,1)=='F[N@H+](Cl)Br')
inD="""Case 10-14-3
ChemDraw10140308512D
4 3 0 0 0 0 0 0 0 0999 V2000
-0.8250 -0.4125 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 -0.4125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.8250 -0.4125 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.4125 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 1 0
2 4 1 1
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==4)
self.assertTrue(Chem.MolToSmiles(m1,1)=='F[C@H](Cl)Br')
inD="""Case 10-14-4
ChemDraw10140308512D
4 3 0 0 0 0 0 0 0 0999 V2000
-0.8250 -0.4125 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 -0.4125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.8250 -0.4125 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.4125 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 1 1
2 4 1 0
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==4)
self.assertTrue(Chem.MolToSmiles(m1,1)=='F[C@H](Cl)Br')
inD="""chiral4.mol
ChemDraw10160315172D
6 6 0 0 0 0 0 0 0 0999 V2000
-0.4422 0.1402 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.4422 -0.6848 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.2723 -0.2723 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.8547 0.8547 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.6848 0.4422 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
0.8547 -0.8547 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 1 0
3 1 1 0
1 4 1 0
3 5 1 1
3 6 1 0
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==6)
self.assertTrue(Chem.MolToSmiles(m1,1)=='CC1C[C@@]1(F)Cl',Chem.MolToSmiles(m1,1))
inD="""chiral4.mol
ChemDraw10160315172D
6 6 0 0 0 0 0 0 0 0999 V2000
-0.4422 0.1402 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.4422 -0.6848 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.2723 -0.2723 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.8547 0.8547 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.6848 0.4422 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
0.8547 -0.8547 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 1 0
3 1 1 0
1 4 1 0
3 5 1 6
3 6 1 0
M END
"""
m1 = Chem.MolFromMolBlock(inD)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==6)
self.assertTrue(Chem.MolToSmiles(m1,1)=='CC1C[C@]1(F)Cl',Chem.MolToSmiles(m1,1))
def test33Issue65(self) :
""" issue 65 relates to handling of [H] in SMARTS
"""
m1 = Chem.MolFromSmiles('OC(O)(O)O')
m2 = Chem.MolFromSmiles('OC(O)O')
m3 = Chem.MolFromSmiles('OCO')
q1 = Chem.MolFromSmarts('OC[H]',1)
q2 = Chem.MolFromSmarts('O[C;H1]',1)
q3 = Chem.MolFromSmarts('O[C;H1][H]',1)
self.assertTrue(not m1.HasSubstructMatch(q1))
self.assertTrue(not m1.HasSubstructMatch(q2))
self.assertTrue(not m1.HasSubstructMatch(q3))
self.assertTrue(m2.HasSubstructMatch(q1))
self.assertTrue(m2.HasSubstructMatch(q2))
self.assertTrue(m2.HasSubstructMatch(q3))
self.assertTrue(m3.HasSubstructMatch(q1))
self.assertTrue(not m3.HasSubstructMatch(q2))
self.assertTrue(not m3.HasSubstructMatch(q3))
m1H = Chem.AddHs(m1)
m2H = Chem.AddHs(m2)
m3H = Chem.AddHs(m3)
q1 = Chem.MolFromSmarts('OC[H]')
q2 = Chem.MolFromSmarts('O[C;H1]')
q3 = Chem.MolFromSmarts('O[C;H1][H]')
self.assertTrue(not m1H.HasSubstructMatch(q1))
self.assertTrue(not m1H.HasSubstructMatch(q2))
self.assertTrue(not m1H.HasSubstructMatch(q3))
#m2H.Debug()
self.assertTrue(m2H.HasSubstructMatch(q1))
self.assertTrue(m2H.HasSubstructMatch(q2))
self.assertTrue(m2H.HasSubstructMatch(q3))
self.assertTrue(m3H.HasSubstructMatch(q1))
self.assertTrue(not m3H.HasSubstructMatch(q2))
self.assertTrue(not m3H.HasSubstructMatch(q3))
def test34Issue124(self) :
""" issue 124 relates to calculation of the distance matrix
"""
m = Chem.MolFromSmiles('CC=C')
d = Chem.GetDistanceMatrix(m,0)
self.assertTrue(feq(d[0,1],1.0))
self.assertTrue(feq(d[0,2],2.0))
# force an update:
d = Chem.GetDistanceMatrix(m,1,0,1)
self.assertTrue(feq(d[0,1],1.0))
self.assertTrue(feq(d[0,2],1.5))
def test35ChiralityPerception(self) :
""" Test perception of chirality and CIP encoding
"""
m = Chem.MolFromSmiles('F[C@]([C@])(Cl)Br')
Chem.AssignStereochemistry(m,1)
self.assertTrue(m.GetAtomWithIdx(1).HasProp('_CIPCode'))
self.assertFalse(m.GetAtomWithIdx(2).HasProp('_CIPCode'))
Chem.RemoveStereochemistry(m)
self.assertFalse(m.GetAtomWithIdx(1).HasProp('_CIPCode'))
m = Chem.MolFromSmiles('F[C@H](C)C')
Chem.AssignStereochemistry(m,1)
self.assertTrue(m.GetAtomWithIdx(1).GetChiralTag() == Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(m.GetAtomWithIdx(1).HasProp('_CIPCode'))
m = Chem.MolFromSmiles('F\\C=C/Cl')
self.assertTrue(m.GetBondWithIdx(0).GetStereo()==Chem.BondStereo.STEREONONE)
self.assertTrue(m.GetBondWithIdx(1).GetStereo()==Chem.BondStereo.STEREOZ)
atoms = m.GetBondWithIdx(1).GetStereoAtoms()
self.assertTrue(0 in atoms)
self.assertTrue(3 in atoms)
self.assertTrue(m.GetBondWithIdx(2).GetStereo()==Chem.BondStereo.STEREONONE)
Chem.RemoveStereochemistry(m)
self.assertTrue(m.GetBondWithIdx(1).GetStereo()==Chem.BondStereo.STEREONONE)
m = Chem.MolFromSmiles('F\\C=CCl')
self.assertTrue(m.GetBondWithIdx(1).GetStereo()==Chem.BondStereo.STEREONONE)
def test36SubstructMatchStr(self):
""" test the _SubstructMatchStr function """
query = Chem.MolFromSmarts('[n,p]1ccccc1')
self.assertTrue(query)
mol = Chem.MolFromSmiles('N1=CC=CC=C1')
self.assertTrue(mol.HasSubstructMatch(query))
self.assertTrue(Chem._HasSubstructMatchStr(mol.ToBinary(),query))
mol = Chem.MolFromSmiles('S1=CC=CC=C1')
self.assertTrue(not Chem._HasSubstructMatchStr(mol.ToBinary(),query))
self.assertTrue(not mol.HasSubstructMatch(query))
mol = Chem.MolFromSmiles('P1=CC=CC=C1')
self.assertTrue(mol.HasSubstructMatch(query))
self.assertTrue(Chem._HasSubstructMatchStr(mol.ToBinary(),query))
def test37SanitException(self):
mol = Chem.MolFromSmiles('CC(C)(C)(C)C',0)
self.assertTrue(mol)
self.assertRaises(ValueError,lambda:Chem.SanitizeMol(mol))
def test38TDTSuppliers(self):
data="""$SMI<Cc1nnc(N)nc1C>
CAS<17584-12-2>
|
$SMI<Cc1n[nH]c(=O)nc1N>
CAS<~>
|
$SMI<Cc1n[nH]c(=O)[nH]c1=O>
CAS<932-53-6>
|
$SMI<Cc1nnc(NN)nc1O>
CAS<~>
|"""
suppl = Chem.TDTMolSupplier()
suppl.SetData(data,"CAS")
i=0;
for mol in suppl:
self.assertTrue(mol)
self.assertTrue(mol.GetNumAtoms())
self.assertTrue(mol.HasProp("CAS"))
self.assertTrue(mol.HasProp("_Name"))
self.assertTrue(mol.GetProp("CAS")==mol.GetProp("_Name"))
self.assertTrue(mol.GetNumConformers()==0)
i+=1
self.assertTrue(i==4)
self.assertTrue(len(suppl)==4)
def test38Issue266(self):
""" test issue 266: generation of kekulized smiles"""
mol = Chem.MolFromSmiles('c1ccccc1')
Chem.Kekulize(mol)
smi = Chem.MolToSmiles(mol)
self.assertTrue(smi=='c1ccccc1')
smi = Chem.MolToSmiles(mol,kekuleSmiles=True)
self.assertTrue(smi=='C1=CC=CC=C1')
def test39Issue273(self):
""" test issue 273: MolFileComments and MolFileInfo props ending up in SD files
"""
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','Wrap',
'test_data','outNCI_few.sdf')
suppl = Chem.SDMolSupplier(fileN)
ms = [x for x in suppl]
for m in ms:
self.assertTrue(m.HasProp('_MolFileInfo'))
self.assertTrue(m.HasProp('_MolFileComments'))
fName = tempfile.mktemp('.sdf')
w = Chem.SDWriter(fName)
w.SetProps(ms[0].GetPropNames())
for m in ms:
w.write(m)
w = None
with open(fName, 'r') as txtFile:
txt= txtFile.read()
os.unlink(fName)
self.assertTrue(txt.find('MolFileInfo')==-1)
self.assertTrue(txt.find('MolFileComments')==-1)
def test40SmilesRootedAtAtom(self):
""" test the rootAtAtom functionality
"""
smi = 'CN(C)C'
m = Chem.MolFromSmiles(smi)
self.assertTrue(Chem.MolToSmiles(m)=='CN(C)C')
self.assertTrue(Chem.MolToSmiles(m,rootedAtAtom=1)=='N(C)(C)C')
def test41SetStreamIndices(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf')
allIndices = []
ifs = open(fileN, 'rb')
addIndex = True;
line = True;
pos = 0;
while (line):
if (addIndex):
pos = ifs.tell();
line = ifs.readline().decode('utf-8')
if (line):
if (addIndex):
allIndices.append(pos);
addIndex = (line[:4] == '$$$$')
ifs.close()
indices = allIndices
sdSup = Chem.SDMolSupplier(fileN)
molNames = ["48", "78", "128", "163", "164", "170", "180", "186",
"192", "203", "210", "211", "213", "220", "229", "256"]
sdSup._SetStreamIndices(indices)
self.assertTrue(len(sdSup) == 16)
mol = sdSup[5]
self.assertTrue(mol.GetProp("_Name") == "170")
i = 0
for mol in sdSup :
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
ns = [mol.GetProp("_Name") for mol in sdSup]
self.assertTrue(ns == molNames)
# this can also be used to skip molecules in the file:
indices = [allIndices[0], allIndices[2], allIndices[5]]
sdSup._SetStreamIndices(indices)
self.assertTrue(len(sdSup) == 3)
mol = sdSup[2]
self.assertTrue(mol.GetProp("_Name") == "170")
# or to reorder them:
indices = [allIndices[0], allIndices[5], allIndices[2]]
sdSup._SetStreamIndices(indices)
self.assertTrue(len(sdSup) == 3)
mol = sdSup[1]
self.assertTrue(mol.GetProp("_Name") == "170")
def test42LifeTheUniverseAndEverything(self) :
self.assertTrue(True)
def test43TplFileParsing(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','cmpd2.tpl')
m1 = Chem.MolFromTPLFile(fileN)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==12)
self.assertTrue(m1.GetNumConformers()==2)
m1 = Chem.MolFromTPLFile(fileN,skipFirstConf=True)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==12)
self.assertTrue(m1.GetNumConformers()==1)
with open(fileN, 'r') as blockFile:
block = blockFile.read()
m1 = Chem.MolFromTPLBlock(block)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==12)
self.assertTrue(m1.GetNumConformers()==2)
def test44TplFileWriting(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','cmpd2.tpl')
m1 = Chem.MolFromTPLFile(fileN)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==12)
self.assertTrue(m1.GetNumConformers()==2)
block = Chem.MolToTPLBlock(m1)
m1 = Chem.MolFromTPLBlock(block)
self.assertTrue(m1 is not None)
self.assertTrue(m1.GetNumAtoms()==12)
self.assertTrue(m1.GetNumConformers()==2)
def test45RingInfo(self):
""" test the RingInfo class
"""
smi = 'CNC'
m = Chem.MolFromSmiles(smi)
ri = m.GetRingInfo()
self.assertTrue(ri)
self.assertTrue(ri.NumRings()==0)
self.assertFalse(ri.IsAtomInRingOfSize(0,3))
self.assertFalse(ri.IsAtomInRingOfSize(1,3))
self.assertFalse(ri.IsAtomInRingOfSize(2,3))
self.assertFalse(ri.IsBondInRingOfSize(1,3))
self.assertFalse(ri.IsBondInRingOfSize(2,3))
smi = 'C1CC2C1C2'
m = Chem.MolFromSmiles(smi)
ri = m.GetRingInfo()
self.assertTrue(ri)
self.assertTrue(ri.NumRings()==2)
self.assertFalse(ri.IsAtomInRingOfSize(0,3))
self.assertTrue(ri.IsAtomInRingOfSize(0,4))
self.assertFalse(ri.IsBondInRingOfSize(0,3))
self.assertTrue(ri.IsBondInRingOfSize(0,4))
self.assertTrue(ri.IsAtomInRingOfSize(2,4))
self.assertTrue(ri.IsAtomInRingOfSize(2,3))
self.assertTrue(ri.IsBondInRingOfSize(2,3))
self.assertTrue(ri.IsBondInRingOfSize(2,4))
def test46ReplaceCore(self):
""" test the ReplaceCore functionality
"""
core = Chem.MolFromSmiles('C=O')
smi = 'CCC=O'
m = Chem.MolFromSmiles(smi)
r = Chem.ReplaceCore(m,core)
self.assertTrue(r)
self.assertEqual(Chem.MolToSmiles(r,True),'[1*]CC')
smi = 'C1CC(=O)CC1'
m = Chem.MolFromSmiles(smi)
r = Chem.ReplaceCore(m,core)
self.assertTrue(r)
self.assertEqual(Chem.MolToSmiles(r,True),'[1*]CCCC[2*]')
smi = 'C1CC(=N)CC1'
m = Chem.MolFromSmiles(smi)
r = Chem.ReplaceCore(m,core)
self.assertFalse(r)
# smiles, smarts, replaceDummies, labelByIndex, useChirality
expected = {
('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', False, False, False) : '[1*]OC.[2*]NC' ,
('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', False, False, True) : '[1*]NC.[2*]OC' ,
('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', False, True, False) : '[3*]OC.[4*]NC' ,
('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', False, True, True) : '[3*]NC.[4*]OC' ,
('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', True, False, False) : '[1*]C.[2*]C' ,
('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', True, False, True) : '[1*]C.[2*]C' ,
('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', True, True, False) : '[3*]C.[4*]C' ,
('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', True, True, True) : '[3*]C.[4*]C' ,
('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', False, False, False) : '[1*]OC.[2*]NC' ,
('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', False, False, True) : '[1*]OC.[2*]NC' ,
('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', False, True, False) : '[3*]OC.[4*]NC' ,
('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', False, True, True) : '[3*]OC.[4*]NC' ,
('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', True, False, False) : '[1*]C.[2*]C' ,
('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', True, False, True) : '[1*]C.[2*]C' ,
('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', True, True, False) : '[3*]C.[4*]C' ,
('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', True, True, True) : '[3*]C.[4*]C' ,
}
for (smiles, smarts, replaceDummies, labelByIndex, useChirality), expected_smiles in expected.items():
mol = Chem.MolFromSmiles(smiles)
core = Chem.MolFromSmarts(smarts)
nm = Chem.ReplaceCore(
mol,
core,
replaceDummies=replaceDummies,
labelByIndex=labelByIndex,
useChirality=useChirality)
if Chem.MolToSmiles(nm, True) != expected_smiles:
print("ReplaceCore(%r, %r, replaceDummies=%r, labelByIndex=%r, useChirality=%r"%(
smiles, smarts, replaceDummies, labelByIndex, useChirality), file=sys.stderr)
print("expected: %s\ngot: %s"%(expected_smiles, Chem.MolToSmiles(nm, True)), file=sys.stderr)
self.assertEquals(expected_smiles, Chem.MolToSmiles(nm, True))
matchVect = mol.GetSubstructMatch(core, useChirality=useChirality)
nm = Chem.ReplaceCore(mol, core, matchVect,
replaceDummies=replaceDummies,
labelByIndex=labelByIndex)
if Chem.MolToSmiles(nm, True) != expected_smiles:
print("ReplaceCore(%r, %r, %r, replaceDummies=%r, labelByIndex=%rr"%(
smiles, smarts, matchVect, replaceDummies, labelByIndex),
file=sys.stderr)
print("expected: %s\ngot: %s"%(expected_smiles, Chem.MolToSmiles(nm, True)), file=sys.stderr)
self.assertEquals(expected_smiles, Chem.MolToSmiles(nm, True))
mol = Chem.MolFromSmiles("C")
smarts = Chem.MolFromSmarts("C")
try:
Chem.ReplaceCore(mol, smarts, (3,))
self.asssertFalse(True)
except:
pass
mol = Chem.MolFromSmiles("C")
smarts = Chem.MolFromSmarts("C")
try:
Chem.ReplaceCore(mol, smarts, (0,0))
self.asssertFalse(True)
except:
pass
def test47RWMols(self):
""" test the RWMol class
"""
mol = Chem.MolFromSmiles('C1CCC1')
self.assertTrue(type(mol)==Chem.Mol)
for rwmol in [Chem.EditableMol(mol), Chem.RWMol(mol)]:
self.assertTrue(type(rwmol) in [Chem.EditableMol, Chem.RWMol])
newAt = Chem.Atom(8)
rwmol.ReplaceAtom(0,newAt)
self.assertTrue(Chem.MolToSmiles(rwmol.GetMol())=='C1COC1')
rwmol.RemoveBond(0,1)
self.assertTrue(Chem.MolToSmiles(rwmol.GetMol())=='CCCO')
a = Chem.Atom(7)
idx=rwmol.AddAtom(a)
self.assertEqual(rwmol.GetMol().GetNumAtoms(),5)
self.assertEqual(idx,4)
idx=rwmol.AddBond(0,4,order=Chem.BondType.SINGLE)
self.assertEqual(idx,4)
self.assertTrue(Chem.MolToSmiles(rwmol.GetMol())=='CCCON')
rwmol.AddBond(4,1,order=Chem.BondType.SINGLE)
self.assertTrue(Chem.MolToSmiles(rwmol.GetMol())=='C1CNOC1')
rwmol.RemoveAtom(3)
self.assertTrue(Chem.MolToSmiles(rwmol.GetMol())=='CCNO')
# practice shooting ourselves in the foot:
m = Chem.MolFromSmiles('c1ccccc1')
em=Chem.EditableMol(m)
em.RemoveAtom(0)
m2 = em.GetMol()
self.assertRaises(ValueError,lambda : Chem.SanitizeMol(m2))
m = Chem.MolFromSmiles('c1ccccc1')
em=Chem.EditableMol(m)
em.RemoveBond(0,1)
m2 = em.GetMol()
self.assertRaises(ValueError,lambda : Chem.SanitizeMol(m2))
# boundary cases:
# removing non-existent bonds:
m = Chem.MolFromSmiles('c1ccccc1')
em=Chem.EditableMol(m)
em.RemoveBond(0,2)
m2 = em.GetMol()
Chem.SanitizeMol(m2)
self.assertTrue(Chem.MolToSmiles(m2)=='c1ccccc1')
# removing non-existent atoms:
m = Chem.MolFromSmiles('c1ccccc1')
em=Chem.EditableMol(m)
self.assertRaises(RuntimeError,lambda:em.RemoveAtom(12))
def test47SmartsPieces(self):
""" test the GetAtomSmarts and GetBondSmarts functions
"""
m =Chem.MolFromSmarts("[C,N]C")
self.assertTrue(m.GetAtomWithIdx(0).GetSmarts()=='[C,N]')
self.assertTrue(m.GetAtomWithIdx(1).GetSmarts()=='C')
self.assertTrue(m.GetBondBetweenAtoms(0,1).GetSmarts()=='-,:')
m =Chem.MolFromSmarts("[$(C=O)]-O")
self.assertTrue(m.GetAtomWithIdx(0).GetSmarts()=='[$(C=O)]')
self.assertTrue(m.GetAtomWithIdx(1).GetSmarts()=='O')
self.assertTrue(m.GetBondBetweenAtoms(0,1).GetSmarts()=='-')
m =Chem.MolFromSmiles("CO")
self.assertTrue(m.GetAtomWithIdx(0).GetSmarts()=='C')
self.assertTrue(m.GetAtomWithIdx(1).GetSmarts()=='O')
self.assertTrue(m.GetBondBetweenAtoms(0,1).GetSmarts()=='')
self.assertTrue(m.GetBondBetweenAtoms(0,1).GetSmarts(allBondsExplicit=True)=='-')
m =Chem.MolFromSmiles("C=O")
self.assertTrue(m.GetAtomWithIdx(0).GetSmarts()=='C')
self.assertTrue(m.GetAtomWithIdx(1).GetSmarts()=='O')
self.assertTrue(m.GetBondBetweenAtoms(0,1).GetSmarts()=='=')
def test48Issue1928819(self):
""" test a crash involving looping directly over mol suppliers
"""
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf')
ms = [x for x in Chem.SDMolSupplier(fileN)]
self.assertEqual(len(ms),16)
count=0
for m in Chem.SDMolSupplier(fileN): count+=1
self.assertEqual(count,16)
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','fewSmi.csv')
count=0
for m in Chem.SmilesMolSupplier(fileN,titleLine=False,smilesColumn=1,delimiter=','): count+=1
self.assertEqual(count,10)
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','acd_few.tdt')
count=0
for m in Chem.TDTMolSupplier(fileN): count+=1
self.assertEqual(count,10)
def test49Issue1932365(self):
""" test aromatic Se and Te from smiles/smarts
"""
m = Chem.MolFromSmiles('c1ccc[se]1')
self.assertTrue(m)
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(4).GetIsAromatic())
m = Chem.MolFromSmiles('c1ccc[te]1')
self.assertTrue(m)
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(4).GetIsAromatic())
m = Chem.MolFromSmiles('C1=C[Se]C=C1')
self.assertTrue(m)
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(2).GetIsAromatic())
m = Chem.MolFromSmiles('C1=C[Te]C=C1')
self.assertTrue(m)
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(2).GetIsAromatic())
p = Chem.MolFromSmarts('[se]')
self.assertTrue(Chem.MolFromSmiles('c1ccc[se]1').HasSubstructMatch(p))
self.assertFalse(Chem.MolFromSmiles('C1=CCC[Se]1').HasSubstructMatch(p))
p = Chem.MolFromSmarts('[te]')
self.assertTrue(Chem.MolFromSmiles('c1ccc[te]1').HasSubstructMatch(p))
self.assertFalse(Chem.MolFromSmiles('C1=CCC[Te]1').HasSubstructMatch(p))
def test50Issue1968608(self):
""" test sf.net issue 1968608
"""
smarts = Chem.MolFromSmarts("[r5]")
mol = Chem.MolFromSmiles("N12CCC36C1CC(C(C2)=CCOC4CC5=O)C4C3N5c7ccccc76")
count = len(mol.GetSubstructMatches(smarts, uniquify=0))
self.assertTrue(count==9)
def test51RadicalHandling(self):
""" test handling of atoms with radicals
"""
mol = Chem.MolFromSmiles("[C]C")
self.assertTrue(mol)
atom=mol.GetAtomWithIdx(0)
self.assertTrue(atom.GetNumRadicalElectrons()==3)
self.assertTrue(atom.GetNoImplicit())
atom.SetNoImplicit(False)
atom.SetNumRadicalElectrons(1)
mol.UpdatePropertyCache()
self.assertTrue(atom.GetNumRadicalElectrons()==1)
self.assertTrue(atom.GetNumImplicitHs()==2)
mol = Chem.MolFromSmiles("[c]1ccccc1")
self.assertTrue(mol)
atom=mol.GetAtomWithIdx(0)
self.assertTrue(atom.GetNumRadicalElectrons()==1)
self.assertTrue(atom.GetNoImplicit())
mol = Chem.MolFromSmiles("[n]1ccccc1")
self.assertTrue(mol)
atom=mol.GetAtomWithIdx(0)
self.assertTrue(atom.GetNumRadicalElectrons()==0)
self.assertTrue(atom.GetNoImplicit())
def test52MolFrags(self):
""" test GetMolFrags functionality
"""
mol = Chem.MolFromSmiles("C.CC")
self.assertTrue(mol)
fs = Chem.GetMolFrags(mol)
self.assertTrue(len(fs)==2)
self.assertTrue(len(fs[0])==1)
self.assertTrue(tuple(fs[0])==(0,))
self.assertTrue(len(fs[1])==2)
self.assertTrue(tuple(fs[1])==(1,2))
fs = Chem.GetMolFrags(mol,True)
self.assertTrue(len(fs)==2)
self.assertTrue(fs[0].GetNumAtoms()==1)
self.assertTrue(fs[1].GetNumAtoms()==2)
mol = Chem.MolFromSmiles("CCC")
self.assertTrue(mol)
fs = Chem.GetMolFrags(mol)
self.assertTrue(len(fs)==1)
self.assertTrue(len(fs[0])==3)
self.assertTrue(tuple(fs[0])==(0,1,2))
fs = Chem.GetMolFrags(mol,True)
self.assertTrue(len(fs)==1)
self.assertTrue(fs[0].GetNumAtoms()==3)
mol = Chem.MolFromSmiles("CO")
em = Chem.EditableMol(mol)
em.RemoveBond(0,1)
nm = em.GetMol()
fs = Chem.GetMolFrags(nm,asMols=True)
self.assertEqual([x.GetNumAtoms(onlyExplicit=False) for x in fs],[5,3])
fs = Chem.GetMolFrags(nm,asMols=True,sanitizeFrags=False)
self.assertEqual([x.GetNumAtoms(onlyExplicit=False) for x in fs],[4,2])
def test53Matrices(self) :
""" test adjacency and distance matrices
"""
m = Chem.MolFromSmiles('CC=C')
d = Chem.GetDistanceMatrix(m,0)
self.assertTrue(feq(d[0,1],1.0))
self.assertTrue(feq(d[0,2],2.0))
self.assertTrue(feq(d[1,0],1.0))
self.assertTrue(feq(d[2,0],2.0))
a = Chem.GetAdjacencyMatrix(m,0)
self.assertTrue(a[0,1]==1)
self.assertTrue(a[0,2]==0)
self.assertTrue(a[1,2]==1)
self.assertTrue(a[1,0]==1)
self.assertTrue(a[2,0]==0)
m = Chem.MolFromSmiles('C1CC1')
d = Chem.GetDistanceMatrix(m,0)
self.assertTrue(feq(d[0,1],1.0))
self.assertTrue(feq(d[0,2],1.0))
a = Chem.GetAdjacencyMatrix(m,0)
self.assertTrue(a[0,1]==1)
self.assertTrue(a[0,2]==1)
self.assertTrue(a[1,2]==1)
m = Chem.MolFromSmiles('CC.C')
d = Chem.GetDistanceMatrix(m,0)
self.assertTrue(feq(d[0,1],1.0))
self.assertTrue(d[0,2]>1000)
self.assertTrue(d[1,2]>1000)
a = Chem.GetAdjacencyMatrix(m,0)
self.assertTrue(a[0,1]==1)
self.assertTrue(a[0,2]==0)
self.assertTrue(a[1,2]==0)
def test54Mol2Parser(self):
""" test the mol2 parser
"""
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','pyrazole_pyridine.mol2')
m = Chem.MolFromMol2File(fileN)
self.assertTrue(m.GetNumAtoms()==5)
self.assertTrue(Chem.MolToSmiles(m)=='c1cn[nH]c1',Chem.MolToSmiles(m))
def test55LayeredFingerprint(self):
m1 = Chem.MolFromSmiles('CC(C)C')
fp1 = Chem.LayeredFingerprint(m1)
self.assertEqual(len(fp1),2048)
atomCounts=[0]*m1.GetNumAtoms()
fp2 = Chem.LayeredFingerprint(m1,atomCounts=atomCounts)
self.assertEqual(fp1,fp2)
self.assertEqual(atomCounts,[4,7,4,4])
fp2 = Chem.LayeredFingerprint(m1,atomCounts=atomCounts)
self.assertEqual(fp1,fp2)
self.assertEqual(atomCounts,[8,14,8,8])
pbv=DataStructs.ExplicitBitVect(2048)
fp3 = Chem.LayeredFingerprint(m1,setOnlyBits=pbv)
self.assertEqual(fp3.GetNumOnBits(),0)
fp3 = Chem.LayeredFingerprint(m1,setOnlyBits=fp2)
self.assertEqual(fp3,fp2)
m2=Chem.MolFromSmiles('CC')
fp4 = Chem.LayeredFingerprint(m2)
atomCounts=[0]*m1.GetNumAtoms()
fp3 = Chem.LayeredFingerprint(m1,setOnlyBits=fp4,atomCounts=atomCounts)
self.assertEqual(atomCounts,[1,3,1,1])
m2=Chem.MolFromSmiles('CCC')
fp4 = Chem.LayeredFingerprint(m2)
atomCounts=[0]*m1.GetNumAtoms()
fp3 = Chem.LayeredFingerprint(m1,setOnlyBits=fp4,atomCounts=atomCounts)
self.assertEqual(atomCounts,[3,6,3,3])
def test56LazySDMolSupplier(self) :
if not hasattr(Chem,'CompressedSDMolSupplier'): return
self.assertRaises(ValueError,lambda : Chem.CompressedSDMolSupplier('nosuchfile.sdf.gz'))
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf.gz')
sdSup = Chem.CompressedSDMolSupplier(fileN)
molNames = ["48", "78", "128", "163", "164", "170", "180", "186",
"192", "203", "210", "211", "213", "220", "229", "256"]
chgs192 = {8:1, 11:1, 15:-1, 18:-1, 20:1, 21:1, 23:-1, 25:-1}
i = 0
for mol in sdSup :
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
if (mol.GetProp("_Name") == "192") :
# test parsed charges on one of the molecules
for id in chgs192.keys() :
self.assertTrue(mol.GetAtomWithIdx(id).GetFormalCharge() == chgs192[id])
self.assertEqual(i,16)
sdSup = Chem.CompressedSDMolSupplier(fileN)
ns = [mol.GetProp("_Name") for mol in sdSup]
self.assertTrue(ns == molNames)
sdSup = Chem.CompressedSDMolSupplier(fileN, 0)
for mol in sdSup :
self.assertTrue(not mol.HasProp("numArom"))
def test57AddRecursiveQuery(self):
q1 = Chem.MolFromSmiles('CC')
q2 = Chem.MolFromSmiles('CO')
Chem.AddRecursiveQuery(q1,q2,1)
m1 = Chem.MolFromSmiles('OCC')
self.assertTrue(m1.HasSubstructMatch(q2))
self.assertTrue(m1.HasSubstructMatch(q1))
self.assertTrue(m1.HasSubstructMatch(q1))
self.assertTrue(m1.GetSubstructMatch(q1)==(2,1))
q3 = Chem.MolFromSmiles('CS')
Chem.AddRecursiveQuery(q1,q3,1)
self.assertFalse(m1.HasSubstructMatch(q3))
self.assertFalse(m1.HasSubstructMatch(q1))
m2 = Chem.MolFromSmiles('OC(S)C')
self.assertTrue(m2.HasSubstructMatch(q1))
self.assertTrue(m2.GetSubstructMatch(q1)==(3,1))
m3 = Chem.MolFromSmiles('SCC')
self.assertTrue(m3.HasSubstructMatch(q3))
self.assertFalse(m3.HasSubstructMatch(q1))
q1 = Chem.MolFromSmiles('CC')
Chem.AddRecursiveQuery(q1,q2,1)
Chem.AddRecursiveQuery(q1,q3,1,False)
self.assertTrue(m3.HasSubstructMatch(q1))
self.assertTrue(m3.GetSubstructMatch(q1)==(2,1))
def test58Issue2983794(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','Wrap',
'test_data','issue2983794.sdf')
m1 = Chem.MolFromMolFile(fileN)
self.assertTrue(m1)
em = Chem.EditableMol(m1)
em.RemoveAtom(0)
m2 = em.GetMol()
Chem.Kekulize(m2)
def test59Issue3007178(self) :
m = Chem.MolFromSmiles('CCC')
a = m.GetAtomWithIdx(0)
m=None
self.assertEqual(Chem.MolToSmiles(a.GetOwningMol()),'CCC')
a=None
m = Chem.MolFromSmiles('CCC')
b = m.GetBondWithIdx(0)
m=None
self.assertEqual(Chem.MolToSmiles(b.GetOwningMol()),'CCC')
def test60SmilesWriterClose(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','fewSmi.csv')
smiSup = Chem.SmilesMolSupplier(fileN, delimiter=",",
smilesColumn=1, nameColumn=0,
titleLine=0)
ms = [x for x in smiSup]
ofile = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','Wrap','test_data','outSmiles.txt')
writer = Chem.SmilesWriter(ofile)
for mol in ms:
writer.write(mol)
writer.close()
newsup=Chem.SmilesMolSupplier(ofile)
newms = [x for x in newsup]
self.assertEqual(len(ms),len(newms))
def test61PathToSubmol(self):
m = Chem.MolFromSmiles('CCCCCC1C(O)CC(O)N1C=CCO')
env = Chem.FindAtomEnvironmentOfRadiusN(m,2,11)
self.assertEqual(len(env),8)
amap={}
submol = Chem.PathToSubmol(m,env,atomMap=amap)
self.assertEqual(submol.GetNumAtoms(),len(amap.keys()))
self.assertEqual(submol.GetNumAtoms(),9)
smi=Chem.MolToSmiles(submol,rootedAtAtom=amap[11])
self.assertEqual(smi[0],'N')
refsmi = Chem.MolToSmiles(Chem.MolFromSmiles('N(C=C)(C(C)C)C(O)C'))
csmi = Chem.MolToSmiles(Chem.MolFromSmiles(smi))
self.assertEqual(refsmi,csmi)
def test62SmilesAndSmartsReplacements(self):
mol = Chem.MolFromSmiles('C{branch}C',replacements={'{branch}':'C1(CC1)'})
self.assertEqual(mol.GetNumAtoms(),5)
mol = Chem.MolFromSmarts('C{branch}C',replacements={'{branch}':'C1(CC1)'})
self.assertEqual(mol.GetNumAtoms(),5)
mol = Chem.MolFromSmiles('C{branch}C{acid}',replacements={'{branch}':'C1(CC1)',
'{acid}':"C(=O)O"})
self.assertEqual(mol.GetNumAtoms(),8)
def test63Issue3313539(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','rgroups1.mol')
m = Chem.MolFromMolFile(fileN)
self.assertTrue(m is not None)
at = m.GetAtomWithIdx(3)
self.assertTrue(at is not None)
self.assertTrue(at.HasProp('_MolFileRLabel'))
p = at.GetProp('_MolFileRLabel')
self.assertEqual(p,'2')
self.assertEqual(Chem.GetAtomRLabel(at), 2)
at = m.GetAtomWithIdx(4)
self.assertTrue(at is not None)
self.assertTrue(at.HasProp('_MolFileRLabel'))
p = at.GetProp('_MolFileRLabel')
self.assertEqual(p,'1')
self.assertEqual(Chem.GetAtomRLabel(at), 1)
def test64MoleculeCleanup(self):
m = Chem.MolFromSmiles('CN(=O)=O',False)
self.assertTrue(m)
self.assertTrue(m.GetAtomWithIdx(1).GetFormalCharge()==0 and \
m.GetAtomWithIdx(2).GetFormalCharge()==0 and \
m.GetAtomWithIdx(3).GetFormalCharge()==0)
self.assertTrue(m.GetBondBetweenAtoms(1,3).GetBondType()==Chem.BondType.DOUBLE and \
m.GetBondBetweenAtoms(1,2).GetBondType()==Chem.BondType.DOUBLE )
Chem.Cleanup(m)
m.UpdatePropertyCache()
self.assertTrue(m.GetAtomWithIdx(1).GetFormalCharge()==1 and \
(m.GetAtomWithIdx(2).GetFormalCharge()==-1 or \
m.GetAtomWithIdx(3).GetFormalCharge()==-1))
self.assertTrue(m.GetBondBetweenAtoms(1,3).GetBondType()==Chem.BondType.SINGLE or \
m.GetBondBetweenAtoms(1,2).GetBondType()==Chem.BondType.SINGLE )
def test65StreamSupplier(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf.gz')
molNames = ["48", "78", "128", "163", "164", "170", "180", "186",
"192", "203", "210", "211", "213", "220", "229", "256"]
inf = gzip.open(fileN)
if 0:
sb = Chem.streambuf(inf)
suppl = Chem.ForwardSDMolSupplier(sb)
else:
suppl = Chem.ForwardSDMolSupplier(inf)
i = 0
while not suppl.atEnd():
mol = six.next(suppl)
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
self.assertEqual(i,16)
# make sure we have object ownership preserved
inf = gzip.open(fileN)
suppl = Chem.ForwardSDMolSupplier(inf)
inf=None
i = 0
while not suppl.atEnd():
mol = six.next(suppl)
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
self.assertEqual(i,16)
def test66StreamSupplierIter(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf.gz')
inf = gzip.open(fileN)
if 0:
sb = Chem.streambuf(inf)
suppl = Chem.ForwardSDMolSupplier(sb)
else:
suppl = Chem.ForwardSDMolSupplier(inf)
molNames = ["48", "78", "128", "163", "164", "170", "180", "186",
"192", "203", "210", "211", "213", "220", "229", "256"]
i = 0
for mol in suppl :
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
self.assertEqual(i,16)
def test67StreamSupplierStringIO(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf.gz')
if six.PY3:
from io import BytesIO
sio = BytesIO(gzip.open(fileN).read())
else:
import StringIO
sio = StringIO.StringIO(gzip.open(fileN).read())
suppl = Chem.ForwardSDMolSupplier(sio)
molNames = ["48", "78", "128", "163", "164", "170", "180", "186",
"192", "203", "210", "211", "213", "220", "229", "256"]
i = 0
for mol in suppl:
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
self.assertEqual(i,16)
def test68ForwardSupplierUsingFilename(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf')
suppl = Chem.ForwardSDMolSupplier(fileN)
molNames = ["48", "78", "128", "163", "164", "170", "180", "186",
"192", "203", "210", "211", "213", "220", "229", "256"]
i = 0
for mol in suppl:
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
self.assertEqual(i,16)
self.assertRaises(IOError,lambda : Chem.ForwardSDMolSupplier('nosuchfile.sdf'))
def test69StreamSupplierStreambuf(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf.gz')
sb = rdBase.streambuf(gzip.open(fileN))
suppl = Chem.ForwardSDMolSupplier(sb)
molNames = ["48", "78", "128", "163", "164", "170", "180", "186",
"192", "203", "210", "211", "213", "220", "229", "256"]
i = 0
for mol in suppl:
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
self.assertEqual(i,16)
def test70StreamSDWriter(self):
if six.PY3:
from io import BytesIO,StringIO
else:
from StringIO import StringIO
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf.gz')
inf = gzip.open(fileN)
suppl = Chem.ForwardSDMolSupplier(inf)
osio=StringIO()
w = Chem.SDWriter(osio)
molNames = ["48", "78", "128", "163", "164", "170", "180", "186",
"192", "203", "210", "211", "213", "220", "229", "256"]
i = 0
for mol in suppl :
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
w.write(mol)
i += 1
self.assertEqual(i,16)
w.flush()
w=None
if six.PY3:
txt = osio.getvalue().encode()
isio = BytesIO(txt)
else:
isio = StringIO(osio.getvalue())
suppl = Chem.ForwardSDMolSupplier(isio)
i = 0
for mol in suppl :
self.assertTrue(mol)
self.assertTrue(mol.GetProp("_Name") == molNames[i])
i += 1
self.assertEqual(i,16)
def test71StreamSmilesWriter(self):
from rdkit.six.moves import StringIO
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','esters.sdf')
suppl = Chem.ForwardSDMolSupplier(fileN)
osio=StringIO()
w = Chem.SmilesWriter(osio)
ms = [x for x in suppl]
w.SetProps(ms[0].GetPropNames())
i=0
for mol in ms:
self.assertTrue(mol)
w.write(mol)
i+=1
self.assertEqual(i,6)
w.flush()
w=None
txt = osio.getvalue()
self.assertEqual(txt.count('ID'),1)
self.assertEqual(txt.count('\n'),7)
def test72StreamTDTWriter(self):
from rdkit.six.moves import StringIO
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','esters.sdf')
suppl = Chem.ForwardSDMolSupplier(fileN)
osio=StringIO()
w = Chem.TDTWriter(osio)
ms = [x for x in suppl]
w.SetProps(ms[0].GetPropNames())
i=0
for mol in ms:
self.assertTrue(mol)
w.write(mol)
i+=1
self.assertEqual(i,6)
w.flush()
w=None
txt = osio.getvalue()
self.assertEqual(txt.count('ID'),6)
self.assertEqual(txt.count('NAME'),6)
def test73SanitizationOptions(self):
m = Chem.MolFromSmiles('c1ccccc1',sanitize=False)
res = Chem.SanitizeMol(m,catchErrors=True)
self.assertEqual(res,0)
m = Chem.MolFromSmiles('c1cccc1',sanitize=False)
res = Chem.SanitizeMol(m,catchErrors=True)
self.assertEqual(res,Chem.SanitizeFlags.SANITIZE_KEKULIZE)
m = Chem.MolFromSmiles('CC(C)(C)(C)C',sanitize=False)
res = Chem.SanitizeMol(m,catchErrors=True)
self.assertEqual(res,Chem.SanitizeFlags.SANITIZE_PROPERTIES)
m = Chem.MolFromSmiles('c1cccc1',sanitize=False)
res = Chem.SanitizeMol(m,sanitizeOps=Chem.SanitizeFlags.SANITIZE_ALL^Chem.SanitizeFlags.SANITIZE_KEKULIZE,
catchErrors=True)
self.assertEqual(res,Chem.SanitizeFlags.SANITIZE_NONE)
def test74Issue3510149(self):
mol = Chem.MolFromSmiles("CCC1CNCC1CC")
atoms = mol.GetAtoms()
mol=None
for atom in atoms:
idx=atom.GetIdx()
p= atom.GetOwningMol().GetNumAtoms()
mol = Chem.MolFromSmiles("CCC1CNCC1CC")
bonds = mol.GetBonds()
mol=None
for bond in bonds:
idx=bond.GetIdx()
p= atom.GetOwningMol().GetNumAtoms()
mol = Chem.MolFromSmiles("CCC1CNCC1CC")
bond = mol.GetBondBetweenAtoms(0,1)
mol=None
idx=bond.GetBeginAtomIdx()
p= bond.GetOwningMol().GetNumAtoms()
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf')
sdSup = Chem.SDMolSupplier(fileN)
mol = six.next(sdSup)
nats = mol.GetNumAtoms()
conf = mol.GetConformer()
mol=None
self.assertEqual(nats,conf.GetNumAtoms())
conf.GetOwningMol().GetProp("_Name")
def test75AllBondsExplicit(self):
m = Chem.MolFromSmiles("CCC")
smi = Chem.MolToSmiles(m)
self.assertEqual(smi,"CCC")
smi = Chem.MolToSmiles(m,allBondsExplicit=True)
self.assertEqual(smi,"C-C-C")
m = Chem.MolFromSmiles("c1ccccc1")
smi = Chem.MolToSmiles(m)
self.assertEqual(smi,"c1ccccc1")
smi = Chem.MolToSmiles(m,allBondsExplicit=True)
self.assertEqual(smi,"c1:c:c:c:c:c:1")
def test76VeryLargeMolecule(self):
# this is sf.net issue 3524984
smi = '[C@H](F)(Cl)'+'c1cc[nH]c1'*500+'[C@H](F)(Cl)'
m = Chem.MolFromSmiles(smi)
self.assertTrue(m)
self.assertEqual(m.GetNumAtoms(),2506)
scs = Chem.FindMolChiralCenters(m)
self.assertEqual(len(scs),2)
def test77MolFragmentToSmiles(self):
smi="OC1CC1CC"
m = Chem.MolFromSmiles(smi)
fsmi = Chem.MolFragmentToSmiles(m,[1,2,3])
self.assertEqual(fsmi,"C1CC1")
fsmi = Chem.MolFragmentToSmiles(m,[1,2,3],bondsToUse=[1,2,5])
self.assertEqual(fsmi,"C1CC1")
fsmi = Chem.MolFragmentToSmiles(m,[1,2,3],bondsToUse=[1,2])
self.assertEqual(fsmi,"CCC")
fsmi = Chem.MolFragmentToSmiles(m,[1,2,3],atomSymbols=["","[A]","[C]","[B]","",""])
self.assertEqual(fsmi,"[A]1[B][C]1")
fsmi = Chem.MolFragmentToSmiles(m,[1,2,3],bondSymbols=["","%","%","","","%"])
self.assertEqual(fsmi,"C1%C%C%1")
smi="c1ccccc1C"
m = Chem.MolFromSmiles(smi)
fsmi = Chem.MolFragmentToSmiles(m,range(6))
self.assertEqual(fsmi,"c1ccccc1")
Chem.Kekulize(m)
fsmi = Chem.MolFragmentToSmiles(m,range(6),kekuleSmiles=True)
self.assertEqual(fsmi,"C1=CC=CC=C1")
fsmi = Chem.MolFragmentToSmiles(m,range(6),atomSymbols=["[C]"]*7,kekuleSmiles=True)
self.assertEqual(fsmi,"[C]1=[C][C]=[C][C]=[C]1")
self.assertRaises(ValueError,lambda : Chem.MolFragmentToSmiles(m,[]))
def test78AtomAndBondProps(self):
m = Chem.MolFromSmiles('c1ccccc1')
at = m.GetAtomWithIdx(0)
self.assertFalse(at.HasProp('foo'))
at.SetProp('foo','bar')
self.assertTrue(at.HasProp('foo'))
self.assertEqual(at.GetProp('foo'),'bar')
bond = m.GetBondWithIdx(0)
self.assertFalse(bond.HasProp('foo'))
bond.SetProp('foo','bar')
self.assertTrue(bond.HasProp('foo'))
self.assertEqual(bond.GetProp('foo'),'bar')
def test79AddRecursiveStructureQueries(self):
qs = {'carbonyl':Chem.MolFromSmiles('CO'),
'amine':Chem.MolFromSmiles('CN')}
q = Chem.MolFromSmiles('CCC')
q.GetAtomWithIdx(0).SetProp('query','carbonyl,amine')
Chem.MolAddRecursiveQueries(q,qs,'query')
m = Chem.MolFromSmiles('CCCO')
self.assertTrue(m.HasSubstructMatch(q))
m = Chem.MolFromSmiles('CCCN')
self.assertTrue(m.HasSubstructMatch(q))
m = Chem.MolFromSmiles('CCCC')
self.assertFalse(m.HasSubstructMatch(q))
def test80ParseMolQueryDefFile(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','ChemTransforms',
'testData','query_file1.txt')
d = Chem.ParseMolQueryDefFile(fileN,standardize=False)
self.assertTrue('CarboxylicAcid' in d)
m = Chem.MolFromSmiles('CC(=O)O')
self.assertTrue(m.HasSubstructMatch(d['CarboxylicAcid']))
self.assertFalse(m.HasSubstructMatch(d['CarboxylicAcid.Aromatic']))
d = Chem.ParseMolQueryDefFile(fileN)
self.assertTrue('carboxylicacid' in d)
self.assertFalse('CarboxylicAcid' in d)
def test81Issue275(self):
smi = Chem.MolToSmiles(Chem.MurckoDecompose(Chem.MolFromSmiles('CCCCC[C@H]1CC[C@H](C(=O)O)CC1')))
self.assertEqual(smi,'C1CCCCC1')
def test82Issue288(self):
m = Chem.MolFromSmiles('CC*')
m.GetAtomWithIdx(2).SetProp('molAtomMapNumber','30')
smi=Chem.MolToSmiles(m)
self.assertEqual(smi,'CC[*:30]')
# try newer api
m = Chem.MolFromSmiles('CC*')
m.GetAtomWithIdx(2).SetAtomMapNum(30)
smi=Chem.MolToSmiles(m)
self.assertEqual(smi,'CC[*:30]')
def test83GitHubIssue19(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','empty2.sdf')
sdSup = Chem.SDMolSupplier(fileN)
self.assertTrue(sdSup.atEnd())
self.assertRaises(IndexError,lambda : sdSup[0])
sdSup.SetData('')
self.assertTrue(sdSup.atEnd())
self.assertRaises(IndexError,lambda : sdSup[0])
sdSup = Chem.SDMolSupplier(fileN)
self.assertRaises(IndexError,lambda : sdSup[0])
sdSup.SetData('')
self.assertRaises(IndexError,lambda : sdSup[0])
sdSup = Chem.SDMolSupplier(fileN)
self.assertEqual(len(sdSup),0)
sdSup.SetData('')
self.assertEqual(len(sdSup),0)
def test84PDBBasics(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','1CRN.pdb')
m = Chem.MolFromPDBFile(fileN)
self.assertTrue(m is not None)
self.assertEqual(m.GetNumAtoms(),327)
self.assertEqual(m.GetNumBonds(),337)
self.assertTrue(m.GetAtomWithIdx(0).GetPDBResidueInfo())
self.assertEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetName()," N ")
self.assertEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetResidueName(),"THR")
self.assertAlmostEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetTempFactor(),13.79,2)
m = Chem.MolFromPDBBlock(Chem.MolToPDBBlock(m))
self.assertEqual(m.GetNumAtoms(),327)
self.assertEqual(m.GetNumBonds(),337)
self.assertTrue(m.GetAtomWithIdx(0).GetPDBResidueInfo())
self.assertEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetName()," N ")
self.assertEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetResidueName(),"THR")
self.assertAlmostEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetTempFactor(),13.79,2)
def test85MolCopying(self):
m = Chem.MolFromSmiles('C1CC1[C@H](F)Cl')
m.SetProp('foo','bar')
m2 = Chem.Mol(m)
self.assertEqual(Chem.MolToSmiles(m,True),Chem.MolToSmiles(m2,True))
self.assertTrue(m2.HasProp('foo'))
self.assertEqual(m2.GetProp('foo'),'bar')
ri = m2.GetRingInfo()
self.assertTrue(ri)
self.assertEqual(ri.NumRings(),1)
def test85MolCopying2(self):
import copy
m1 = Chem.MolFromSmiles('CC')
m1.SetProp('Foo','bar')
m1.foo=[1]
m2 = copy.copy(m1)
m3 = copy.copy(m2)
m4 = copy.deepcopy(m1)
m5 = copy.deepcopy(m2)
m6 = copy.deepcopy(m4)
self.assertEquals(m1.GetProp('Foo'),'bar')
self.assertEquals(m2.GetProp('Foo'),'bar')
self.assertEquals(m3.GetProp('Foo'),'bar')
self.assertEquals(m4.GetProp('Foo'),'bar')
self.assertEquals(m5.GetProp('Foo'),'bar')
self.assertEquals(m6.GetProp('Foo'),'bar')
m2.foo.append(4)
self.assertEquals(m1.foo,[1,4])
self.assertEquals(m2.foo,[1,4])
self.assertEquals(m3.foo,[1,4])
self.assertEquals(m4.foo,[1])
self.assertEquals(m5.foo,[1])
self.assertEquals(m6.foo,[1])
m7 = Chem.RWMol(m1)
self.failIf(hasattr(m7,'foo'))
m7.foo=[1]
m8 = copy.copy(m7)
m9 = copy.deepcopy(m7)
m8.foo.append(4)
self.assertEquals(m7.GetProp('Foo'),'bar')
self.assertEquals(m8.GetProp('Foo'),'bar')
self.assertEquals(m9.GetProp('Foo'),'bar')
self.assertEquals(m8.foo,[1,4])
self.assertEquals(m9.foo,[1])
def test86MolRenumbering(self):
import random
m = Chem.MolFromSmiles('C[C@H]1CC[C@H](C/C=C/[C@H](F)Cl)CC1')
cSmi = Chem.MolToSmiles(m,True)
for i in range(m.GetNumAtoms()):
ans = list(range(m.GetNumAtoms()))
random.shuffle(ans)
m2 = Chem.RenumberAtoms(m,ans)
nSmi = Chem.MolToSmiles(m2,True)
self.assertEqual(cSmi,nSmi)
def test87FragmentOnBonds(self):
m = Chem.MolFromSmiles('CC1CC(O)C1CCC1CC1')
bis = m.GetSubstructMatches(Chem.MolFromSmarts('[!R][R]'))
bs = []
labels=[]
for bi in bis:
b = m.GetBondBetweenAtoms(bi[0],bi[1])
if b.GetBeginAtomIdx()==bi[0]:
labels.append((10,1))
else:
labels.append((1,10))
bs.append(b.GetIdx())
nm = Chem.FragmentOnBonds(m,bs)
frags = Chem.GetMolFrags(nm)
self.assertEqual(len(frags),5)
self.assertEqual(frags,((0, 12), (1, 2, 3, 5, 11, 14, 16), (4, 13), (6, 7, 15, 18), (8, 9, 10, 17)))
smi = Chem.MolToSmiles(nm,True)
self.assertEqual(smi,'[*]C1CC([4*])C1[6*].[1*]C.[3*]O.[5*]CC[8*].[7*]C1CC1')
nm = Chem.FragmentOnBonds(m,bs,dummyLabels=labels)
frags = Chem.GetMolFrags(nm)
self.assertEqual(len(frags),5)
self.assertEqual(frags,((0, 12), (1, 2, 3, 5, 11, 14, 16), (4, 13), (6, 7, 15, 18), (8, 9, 10, 17)))
smi = Chem.MolToSmiles(nm,True)
self.assertEqual(smi,'[1*]C.[1*]CC[1*].[1*]O.[10*]C1CC([10*])C1[10*].[10*]C1CC1')
m = Chem.MolFromSmiles('CCC(=O)CC(=O)C')
bis = m.GetSubstructMatches(Chem.MolFromSmarts('C=O'))
bs = []
for bi in bis:
b = m.GetBondBetweenAtoms(bi[0],bi[1])
bs.append(b.GetIdx())
bts = [Chem.BondType.DOUBLE]*len(bs)
nm = Chem.FragmentOnBonds(m,bs,bondTypes=bts)
frags = Chem.GetMolFrags(nm)
self.assertEqual(len(frags),3)
smi = Chem.MolToSmiles(nm,True)
self.assertEqual(smi,'[2*]=O.[3*]=C(CC)CC(=[6*])C.[5*]=O')
# github issue 430:
m = Chem.MolFromSmiles('OCCCCN')
self.assertRaises(ValueError,lambda : Chem.FragmentOnBonds(m,()))
def test88QueryAtoms(self):
from rdkit.Chem import rdqueries
m = Chem.MolFromSmiles('c1nc(C)n(CC)c1')
qa = rdqueries.ExplicitDegreeEqualsQueryAtom(3)
l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)])
self.assertEqual(l,(2,4))
qa.ExpandQuery(rdqueries.AtomNumEqualsQueryAtom(6,negate=True))
l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)])
self.assertEqual(l,(4,))
qa = rdqueries.ExplicitDegreeEqualsQueryAtom(3)
qa.ExpandQuery(rdqueries.AtomNumEqualsQueryAtom(6,negate=True),
how=Chem.CompositeQueryType.COMPOSITE_OR)
l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)])
self.assertEqual(l,(1,2,4))
qa = rdqueries.ExplicitDegreeEqualsQueryAtom(3)
qa.ExpandQuery(rdqueries.AtomNumEqualsQueryAtom(6,negate=True),
how=Chem.CompositeQueryType.COMPOSITE_XOR)
l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)])
self.assertEqual(l,(1,2))
qa = rdqueries.ExplicitDegreeGreaterQueryAtom(2)
l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)])
self.assertEqual(l,(2,4))
qa = rdqueries.ExplicitDegreeLessQueryAtom(2)
l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)])
self.assertEqual(l,(3,6))
def test89UnicodeInput(self):
m = Chem.MolFromSmiles(u'c1ccccc1')
self.assertTrue(m is not None)
self.assertEqual(m.GetNumAtoms(),6)
m = Chem.MolFromSmarts(u'c1ccccc1')
self.assertTrue(m is not None)
self.assertEqual(m.GetNumAtoms(),6)
def test90FragmentOnSomeBonds(self):
m = Chem.MolFromSmiles('OCCCCN')
pieces = Chem.FragmentOnSomeBonds(m,(0,2,4),2)
self.assertEqual(len(pieces),3)
frags = Chem.GetMolFrags(pieces[0])
self.assertEqual(len(frags),3)
self.assertEqual(len(frags[0]),2)
self.assertEqual(len(frags[1]),4)
self.assertEqual(len(frags[2]),4)
frags = Chem.GetMolFrags(pieces[1])
self.assertEqual(len(frags),3)
self.assertEqual(len(frags[0]),2)
self.assertEqual(len(frags[1]),6)
self.assertEqual(len(frags[2]),2)
frags = Chem.GetMolFrags(pieces[2])
self.assertEqual(len(frags),3)
self.assertEqual(len(frags[0]),4)
self.assertEqual(len(frags[1]),4)
self.assertEqual(len(frags[2]),2)
pieces,cpa = Chem.FragmentOnSomeBonds(m,(0,2,4),2,returnCutsPerAtom=True)
self.assertEqual(len(pieces),3)
self.assertEqual(len(cpa),3)
self.assertEqual(len(cpa[0]),m.GetNumAtoms())
# github issue 430:
m = Chem.MolFromSmiles('OCCCCN')
self.assertRaises(ValueError,lambda : Chem.FragmentOnSomeBonds(m,()))
pieces = Chem.FragmentOnSomeBonds(m,(0,2,4),0)
self.assertEqual(len(pieces),0)
def test91RankAtoms(self):
m = Chem.MolFromSmiles('ONCS.ONCS')
ranks = Chem.CanonicalRankAtoms(m,breakTies=False)
self.assertEqual(list(ranks[0:4]), list(ranks[4:]))
m = Chem.MolFromSmiles("c1ccccc1")
ranks = Chem.CanonicalRankAtoms(m,breakTies=False)
for x in ranks:
self.assertEqual(x, 0)
m = Chem.MolFromSmiles("C1NCN1")
ranks = Chem.CanonicalRankAtoms(m,breakTies=False)
self.assertEqual(ranks[0], ranks[2])
self.assertEqual(ranks[1], ranks[3])
def test92RankAtomsInFragment(self):
m = Chem.MolFromSmiles('ONCS.ONCS')
ranks = Chem.CanonicalRankAtomsInFragment(m,
[0,1,2,3],
[0,1,2])
ranks2 = Chem.CanonicalRankAtomsInFragment(m,
[4,5,6,7],
[3,4,5])
self.assertEquals(list(ranks[0:4]),list(ranks2[4:]))
self.assertEquals(list(ranks[4:]), [-1]*4)
self.assertEquals(list(ranks2[0:4]), [-1]*4)
# doc tests
mol = Chem.MolFromSmiles('C1NCN1.C1NCN1')
self.assertEquals(list(Chem.CanonicalRankAtomsInFragment(mol, atomsToUse=range(0,4), breakTies=False)),
[0,6,0,6,-1,-1,-1,-1])
self.assertEquals(list(Chem.CanonicalRankAtomsInFragment(mol, atomsToUse=range(4,8), breakTies=False)),
[-1,-1,-1,-1,0,6,0,6])
def test93RWMolsAsROMol(self):
""" test the RWMol class as a proper ROMol
"""
mol = Chem.MolFromSmiles('C1CCC1')
self.assertTrue(type(mol)==Chem.Mol)
rwmol = Chem.RWMol(mol)
self.assertEqual(Chem.MolToSmiles(rwmol,True),Chem.MolToSmiles(rwmol.GetMol()))
newAt = Chem.Atom(8)
rwmol.ReplaceAtom(0,newAt)
self.assertEqual(Chem.MolToSmiles(rwmol,True),Chem.MolToSmiles(rwmol.GetMol()))
def test94CopyWithConfs(self):
""" test copying Mols with some conformers
"""
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','cmpd2.tpl')
m1 = Chem.MolFromTPLFile(fileN)
self.assertTrue(m1 is not None)
self.assertEquals(m1.GetNumAtoms(),12)
self.assertEquals(m1.GetNumConformers(),2)
self.assertEquals(m1.GetConformer(0).GetNumAtoms(),12)
self.assertEquals(m1.GetConformer(1).GetNumAtoms(),12)
m2 = Chem.Mol(m1)
self.assertEquals(m2.GetNumAtoms(),12)
self.assertEquals(m2.GetNumConformers(),2)
self.assertEquals(m2.GetConformer(0).GetNumAtoms(),12)
self.assertEquals(m2.GetConformer(1).GetNumAtoms(),12)
m2 = Chem.Mol(m1,False,0)
self.assertEquals(m2.GetNumAtoms(),12)
self.assertEquals(m2.GetNumConformers(),1)
self.assertEquals(m2.GetConformer(0).GetNumAtoms(),12)
m2 = Chem.Mol(m1,False,1)
self.assertEquals(m2.GetNumAtoms(),12)
self.assertEquals(m2.GetNumConformers(),1)
self.assertEquals(m2.GetConformer(1).GetNumAtoms(),12)
m2 = Chem.Mol(m1,True)
self.assertTrue(m2.GetNumAtoms()==12)
self.assertTrue(m2.GetNumConformers()==0)
m2 = Chem.RWMol(m1)
self.assertEquals(m2.GetNumAtoms(),12)
self.assertEquals(m2.GetNumConformers(),2)
self.assertEquals(m2.GetConformer(0).GetNumAtoms(),12)
self.assertEquals(m2.GetConformer(1).GetNumAtoms(),12)
m2 = Chem.RWMol(m1,False,0)
self.assertEquals(m2.GetNumAtoms(),12)
self.assertEquals(m2.GetNumConformers(),1)
self.assertEquals(m2.GetConformer(0).GetNumAtoms(),12)
m2 = Chem.RWMol(m1,False,1)
self.assertEquals(m2.GetNumAtoms(),12)
self.assertEquals(m2.GetNumConformers(),1)
self.assertEquals(m2.GetConformer(1).GetNumAtoms(),12)
m2 = Chem.RWMol(m1,True)
self.assertTrue(m2.GetNumAtoms()==12)
self.assertTrue(m2.GetNumConformers()==0)
def testAtomPropQueries(self):
""" test the property queries
"""
from rdkit.Chem import rdqueries
m = Chem.MolFromSmiles("C" * 14)
atoms = m.GetAtoms()
atoms[0].SetProp("hah", "hah")
atoms[1].SetIntProp("bar", 1)
atoms[2].SetIntProp("bar", 2)
atoms[3].SetBoolProp("baz", True)
atoms[4].SetBoolProp("baz", False)
atoms[5].SetProp("boo", "hoo")
atoms[6].SetProp("boo", "-urns")
atoms[7].SetDoubleProp("boot", 1.0)
atoms[8].SetDoubleProp("boot", 4.0)
atoms[9].SetDoubleProp("number", 4.0)
atoms[10].SetIntProp("number", 4)
tests = (
(rdqueries.HasIntPropWithValueQueryAtom, "bar", {1:[1], 2:[2]}),
(rdqueries.HasBoolPropWithValueQueryAtom, "baz", {True:[3], False:[4]}),
(rdqueries.HasStringPropWithValueQueryAtom, "boo", {"hoo":[5], "-urns":[6]}),
(rdqueries.HasDoublePropWithValueQueryAtom, "boot", {1.0:[7], 4.0:[8]})
)
for query, name, lookups in tests:
for t,v in lookups.items():
q = query(name, t)
self.assertEqual( v, [x.GetIdx() for x in m.GetAtomsMatchingQuery(q)] )
q = query(name, t, negate=True)
self.assertEqual( sorted(set(range(14)) - set(v)), [x.GetIdx() for x in m.GetAtomsMatchingQuery(q)] )
# check tolerances
self.assertEqual([x.GetIdx() for x in m.GetAtomsMatchingQuery(
rdqueries.HasDoublePropWithValueQueryAtom("boot", 1.0, tolerance=3.))],
[7,8])
# numbers are numbers?, i.e. int!=double
self.assertEqual([x.GetIdx() for x in m.GetAtomsMatchingQuery(
rdqueries.HasIntPropWithValueQueryAtom("number", 4))],
[10])
def testBondPropQueries(self):
""" test the property queries
"""
from rdkit.Chem import rdqueries
m = Chem.MolFromSmiles("C" * 14)
bonds = m.GetBonds()
bonds[0].SetProp("hah", "hah")
bonds[1].SetIntProp("bar", 1)
bonds[2].SetIntProp("bar", 2)
bonds[3].SetBoolProp("baz", True)
bonds[4].SetBoolProp("baz", False)
bonds[5].SetProp("boo", "hoo")
bonds[6].SetProp("boo", "-urns")
bonds[7].SetDoubleProp("boot", 1.0)
bonds[8].SetDoubleProp("boot", 4.0)
bonds[9].SetDoubleProp("number", 4.0)
bonds[10].SetIntProp("number", 4)
tests = (
(rdqueries.HasIntPropWithValueQueryBond, "bar", {1:[1], 2:[2]}),
(rdqueries.HasBoolPropWithValueQueryBond, "baz", {True:[3], False:[4]}),
(rdqueries.HasStringPropWithValueQueryBond, "boo", {"hoo":[5], "-urns":[6]}),
(rdqueries.HasDoublePropWithValueQueryBond, "boot", {1.0:[7], 4.0:[8]})
)
for query, name, lookups in tests:
for t,v in lookups.items():
q = query(name, t)
self.assertEqual( v, [x.GetIdx() for x in m.GetBonds() if q.Match(x)] )
q = query(name, t, negate=True)
self.assertEqual( sorted(set(range(13)) - set(v)),
[x.GetIdx() for x in m.GetBonds() if q.Match(x)])
# check tolerances
q = rdqueries.HasDoublePropWithValueQueryBond("boot", 1.0, tolerance=3.)
self.assertEqual([x.GetIdx() for x in m.GetBonds() if q.Match(x)],
[7,8])
# numbers are numbers?, i.e. int!=double
q = rdqueries.HasIntPropWithValueQueryBond("number", 4)
self.assertEqual([x.GetIdx() for x in m.GetBonds() if q.Match(x)],
[10])
def testGetShortestPath(self):
""" test the GetShortestPath() wrapper
"""
smi = "CC(OC1C(CCCC3)C3C(CCCC2)C2C1OC(C)=O)=O"
m = Chem.MolFromSmiles(smi)
path = Chem.GetShortestPath(m, 1, 20)
self.assertEqual(path, (1, 2, 3, 16, 17, 18, 20))
def testGithub497(self):
outf = gzip.open(tempfile.mktemp(),'wb+')
m = Chem.MolFromSmiles('C')
w = Chem.SDWriter(outf)
e = False
try:
w.write(m)
except Exception:
sys.stderr.write('Opening gzip as binary fails on Python3 ' \
'upon writing to SDWriter without crashing the RDKit\n')
e = True
else:
e = (sys.version_info < (3, 0))
try:
w.close()
except Exception:
sys.stderr.write('Opening gzip as binary fails on Python3 ' \
'upon closing SDWriter without crashing the RDKit\n')
e = True
else:
if (not e):
e = (sys.version_info < (3, 0))
w=None
try:
outf.close()
except Exception:
sys.stderr.write('Opening gzip as binary fails on Python3 ' \
'upon closing the stream without crashing the RDKit\n')
e = True
else:
if (not e):
e = (sys.version_info < (3, 0))
self.assertTrue(e)
def testGithub498(self):
if (sys.version_info < (3, 0)):
mode = 'w+'
else:
mode = 'wt+'
outf = gzip.open(tempfile.mktemp(), mode)
m = Chem.MolFromSmiles('C')
w = Chem.SDWriter(outf)
w.write(m)
w.close()
w=None
outf.close()
def testAdjustQueryProperties(self):
m = Chem.MolFromSmarts('C1CCC1*')
am = Chem.AdjustQueryProperties(m)
self.assertTrue(Chem.MolFromSmiles('C1CCC1C').HasSubstructMatch(m))
self.assertTrue(Chem.MolFromSmiles('C1CCC1C').HasSubstructMatch(am))
self.assertTrue(Chem.MolFromSmiles('C1CC(C)C1C').HasSubstructMatch(m))
self.assertFalse(Chem.MolFromSmiles('C1CC(C)C1C').HasSubstructMatch(am))
m = Chem.MolFromSmiles('C1CCC1*')
am = Chem.AdjustQueryProperties(m)
self.assertFalse(Chem.MolFromSmiles('C1CCC1C').HasSubstructMatch(m))
self.assertTrue(Chem.MolFromSmiles('C1CCC1C').HasSubstructMatch(am))
qps = Chem.AdjustQueryParameters();
qps.makeDummiesQueries=False
am = Chem.AdjustQueryProperties(m,qps)
self.assertFalse(Chem.MolFromSmiles('C1CCC1C').HasSubstructMatch(am))
m = Chem.MolFromSmiles('C1=CC=CC=C1',sanitize=False)
am = Chem.AdjustQueryProperties(m)
self.assertTrue(Chem.MolFromSmiles('c1ccccc1').HasSubstructMatch(am))
qp = Chem.AdjustQueryParameters()
qp.aromatizeIfPossible = False
am = Chem.AdjustQueryProperties(m,qp)
self.assertFalse(Chem.MolFromSmiles('c1ccccc1').HasSubstructMatch(am))
def testGithubIssue579(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf.gz')
inf = gzip.open(fileN)
suppl = Chem.ForwardSDMolSupplier(inf)
m0 = next(suppl)
self.assertIsNot(m0, None)
inf.close()
del suppl
def testSequenceBasics(self):
" very basic round-tripping of the sequence reader/writer support "
helm = 'PEPTIDE1{C.Y.I.Q.N.C.P.L.G}$$$$'
seq = 'CYIQNCPLG'
fasta = '>\nCYIQNCPLG\n'
smi = 'CC[C@H](C)[C@H](NC(=O)[C@H](Cc1ccc(O)cc1)NC(=O)[C@@H](N)CS)C(=O)N[C@@H](CCC(N)=O)C(=O)N[C@@H](CC(N)=O)C(=O)N[C@@H](CS)C(=O)N1CCC[C@H]1C(=O)N[C@@H](CC(C)C)C(=O)NCC(=O)O'
m = Chem.MolFromSequence(seq)
self.assertTrue(m is not None)
self.assertEqual(Chem.MolToSequence(m),seq)
self.assertEqual(Chem.MolToHELM(m),helm)
self.assertEqual(Chem.MolToFASTA(m),fasta)
self.assertEqual(Chem.MolToSmiles(m,isomericSmiles=True),smi)
m = Chem.MolFromHELM(helm)
self.assertTrue(m is not None)
self.assertEqual(Chem.MolToSequence(m),seq)
self.assertEqual(Chem.MolToHELM(m),helm)
self.assertEqual(Chem.MolToFASTA(m),fasta)
self.assertEqual(Chem.MolToSmiles(m,isomericSmiles=True),smi)
m = Chem.MolFromFASTA(fasta)
self.assertTrue(m is not None)
self.assertEqual(Chem.MolToSequence(m),seq)
self.assertEqual(Chem.MolToHELM(m),helm)
self.assertEqual(Chem.MolToFASTA(m),fasta)
self.assertEqual(Chem.MolToSmiles(m,isomericSmiles=True),smi)
def testResMolSupplier(self):
mol = Chem.MolFromSmiles('CC')
resMolSuppl = Chem.ResonanceMolSupplier(mol)
del resMolSuppl
resMolSuppl = Chem.ResonanceMolSupplier(mol)
self.assertEqual(resMolSuppl.GetNumConjGrps(), 0)
self.assertEqual(len(resMolSuppl), 1)
self.assertEqual(resMolSuppl.GetNumConjGrps(), 0)
mol = Chem.MolFromSmiles('NC(=[NH2+])c1ccc(cc1)C(=O)[O-]')
totalFormalCharge = getTotalFormalCharge(mol)
resMolSuppl = Chem.ResonanceMolSupplier(mol)
self.assertFalse(resMolSuppl.GetIsEnumerated())
self.assertEqual(len(resMolSuppl), 4)
self.assertTrue(resMolSuppl.GetIsEnumerated())
resMolSuppl = Chem.ResonanceMolSupplier(mol)
self.assertFalse(resMolSuppl.GetIsEnumerated())
resMolSuppl.Enumerate()
self.assertTrue(resMolSuppl.GetIsEnumerated())
self.assertTrue((resMolSuppl[0].GetBondBetweenAtoms(0, 1).GetBondType() \
!= resMolSuppl[1].GetBondBetweenAtoms(0, 1).GetBondType())
or (resMolSuppl[0].GetBondBetweenAtoms(9, 10).GetBondType() \
!= resMolSuppl[1].GetBondBetweenAtoms(9, 10).GetBondType()))
resMolSuppl = Chem.ResonanceMolSupplier(mol, Chem.KEKULE_ALL)
self.assertEqual(len(resMolSuppl), 8)
bondTypeDict = {}
# check that we actually have two alternate Kekule structures
bondTypeDict[resMolSuppl[0].GetBondBetweenAtoms(3, 4).GetBondType()] = True
bondTypeDict[resMolSuppl[1].GetBondBetweenAtoms(3, 4).GetBondType()] = True
self.assertEqual(len(bondTypeDict), 2)
bondTypeDict = {}
resMolSuppl = Chem.ResonanceMolSupplier(mol,
Chem.ALLOW_INCOMPLETE_OCTETS \
| Chem.UNCONSTRAINED_CATIONS \
| Chem.UNCONSTRAINED_ANIONS)
self.assertEqual(len(resMolSuppl), 32)
for i in range(len(resMolSuppl)):
resMol = resMolSuppl[i]
self.assertEqual(getTotalFormalCharge(resMol), totalFormalCharge)
while (not resMolSuppl.atEnd()):
resMol = six.next(resMolSuppl)
self.assertEqual(getTotalFormalCharge(resMol), totalFormalCharge)
resMolSuppl.reset()
cmpFormalChargeBondOrder(self, resMolSuppl[0], six.next(resMolSuppl))
resMolSuppl = Chem.ResonanceMolSupplier(mol,
Chem.ALLOW_INCOMPLETE_OCTETS \
| Chem.UNCONSTRAINED_CATIONS \
| Chem.UNCONSTRAINED_ANIONS, 10)
self.assertEqual(len(resMolSuppl), 10)
crambinPdb = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','1CRN.pdb')
mol = Chem.MolFromPDBFile(crambinPdb)
resMolSuppl = Chem.ResonanceMolSupplier(mol)
self.assertEqual(len(resMolSuppl), 1)
resMolSuppl = Chem.ResonanceMolSupplier(mol, Chem.KEKULE_ALL)
self.assertEqual(len(resMolSuppl), 8)
def testSubstructMatchAcetate(self):
mol = Chem.MolFromSmiles('CC(=O)[O-]')
query = Chem.MolFromSmarts('C(=O)[O-]')
resMolSuppl = Chem.ResonanceMolSupplier(mol)
matches = mol.GetSubstructMatches(query)
self.assertEqual(len(matches), 1)
self.assertEqual(matches, ((1, 2, 3),))
matches = mol.GetSubstructMatches(query, uniquify = True)
self.assertEqual(len(matches), 1)
self.assertEqual(matches, ((1, 2, 3),))
matches = mol.GetSubstructMatches(query, uniquify = False)
self.assertEqual(len(matches), 1)
self.assertEqual(matches, ((1, 2, 3),))
matches = resMolSuppl.GetSubstructMatches(query)
self.assertEqual(len(matches), 2)
self.assertEqual(matches, ((1, 2, 3), (1, 3, 2)))
matches = resMolSuppl.GetSubstructMatches(query, uniquify = True)
self.assertEqual(len(matches), 1)
self.assertEqual(matches, ((1, 2, 3),))
matches = resMolSuppl.GetSubstructMatches(query, uniquify = False)
self.assertEqual(len(matches), 2)
self.assertEqual(matches, ((1, 2, 3), (1, 3, 2)))
query = Chem.MolFromSmarts('C(~O)~O')
matches = mol.GetSubstructMatches(query, uniquify = False)
self.assertEqual(len(matches), 2)
self.assertEqual(matches, ((1, 2, 3), (1, 3, 2)))
matches = mol.GetSubstructMatches(query, uniquify = True)
self.assertEqual(len(matches), 1)
self.assertEqual(matches, ((1, 2, 3),))
matches = resMolSuppl.GetSubstructMatches(query, uniquify = False)
self.assertEqual(len(matches), 2)
self.assertEqual(matches, ((1, 2, 3), (1, 3, 2)))
matches = resMolSuppl.GetSubstructMatches(query, uniquify = True)
self.assertEqual(len(matches), 1)
self.assertEqual(matches, ((1, 2, 3),))
def testSubstructMatchDMAP(self):
mol = Chem.MolFromSmiles('C(C)Nc1cc[nH+]cc1')
query = Chem.MolFromSmarts('[#7+]')
resMolSuppl = Chem.ResonanceMolSupplier(mol)
matches = mol.GetSubstructMatches(query,
False, False, False)
self.assertEqual(len(matches), 1)
p = matches[0]
self.assertEqual(p[0], 6)
matches = resMolSuppl.GetSubstructMatches(query,
False, False, False)
self.assertEqual(len(matches), 2)
v = []
p = matches[0]
v.append(p[0])
p = matches[1]
v.append(p[0]);
v.sort()
self.assertEqual(v[0], 2)
self.assertEqual(v[1], 6)
def testCrambin(self):
crambinPdb = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','1CRN.pdb')
crambin = Chem.MolFromPDBFile(crambinPdb)
res = [];
# protonate NH2
res.append(Chem.MolFromSmarts('[Nh2][Ch;Ch2]'))
# protonate Arg
res.append(Chem.MolFromSmarts('[Nh][C]([Nh2])=[Nh]'))
setResidueFormalCharge(crambin, res, 1)
res = [];
# deprotonate COOH
res.append(Chem.MolFromSmarts('C(=O)[Oh]'))
setResidueFormalCharge(crambin, res, -1);
res = [];
resMolSupplST = Chem.ResonanceMolSupplier(crambin)
# crambin has 2 Arg (3 resonance structures each); 1 Asp, 1 Glu
# and 1 terminal COO- (2 resonance structures each)
# so possible resonance structures are 3^2 * 2^3 = 72
self.assertEqual(len(resMolSupplST), 72)
self.assertEqual(resMolSupplST.GetNumConjGrps(), 56)
carboxylateQuery = Chem.MolFromSmarts('C(=O)[O-]')
guanidiniumQuery = Chem.MolFromSmarts('NC(=[NH2+])N')
matches = crambin.GetSubstructMatches(carboxylateQuery)
self.assertEqual(len(matches), 3)
matches = crambin.GetSubstructMatches(carboxylateQuery,
uniquify = False)
self.assertEqual(len(matches), 3)
matches = crambin.GetSubstructMatches(guanidiniumQuery)
self.assertEqual(len(matches), 0)
matches = crambin.GetSubstructMatches(guanidiniumQuery,
uniquify = False)
self.assertEqual(len(matches), 0)
matches = resMolSupplST.GetSubstructMatches(carboxylateQuery)
self.assertEqual(len(matches), 6)
self.assertEqual(matches, ((166, 167, 168), (166, 168, 167),
(298, 299, 300), (298, 300, 299), (320, 321, 326), (320, 326, 321)))
matches = resMolSupplST.GetSubstructMatches(carboxylateQuery,
uniquify = True)
self.assertEqual(len(matches), 3)
self.assertEqual(matches, ((166, 167, 168),
(298, 299, 300), (320, 321, 326)))
matches = resMolSupplST.GetSubstructMatches(guanidiniumQuery)
self.assertEqual(len(matches), 8)
self.assertEqual(matches, ((66, 67, 68, 69), (66, 67, 69, 68),
(68, 67, 69, 66), (69, 67, 68, 66), (123, 124, 125, 126),
(123, 124, 126, 125), (125, 124, 126, 123), (126, 124, 125, 123)))
matches = resMolSupplST.GetSubstructMatches(guanidiniumQuery,
uniquify = True)
self.assertEqual(len(matches), 2)
self.assertEqual(matches, ((66, 67, 69, 68), (123, 124, 126, 125)))
btList2ST = getBtList2(resMolSupplST)
self.assertTrue(btList2ST)
resMolSupplMT = Chem.ResonanceMolSupplier(crambin)
resMolSupplMT.SetNumThreads(0)
self.assertEqual(len(resMolSupplST), len(resMolSupplMT))
btList2MT = getBtList2(resMolSupplMT)
self.assertTrue(btList2MT)
self.assertEqual(len(btList2ST), len(btList2MT))
for i in range(len(btList2ST)):
for j in range(len(btList2ST)):
self.assertEqual(btList2ST[i][j], btList2MT[i][j])
for suppl in [resMolSupplST, resMolSupplMT]:
matches = suppl.GetSubstructMatches(carboxylateQuery,
numThreads = 0)
self.assertEqual(len(matches), 6)
self.assertEqual(matches, ((166, 167, 168), (166, 168, 167),
(298, 299, 300), (298, 300, 299), (320, 321, 326),
(320, 326, 321)))
matches = suppl.GetSubstructMatches(carboxylateQuery,
uniquify = True, numThreads = 0)
self.assertEqual(len(matches), 3)
self.assertEqual(matches,
((166, 167, 168), (298, 299, 300), (320, 321, 326)))
matches = suppl.GetSubstructMatches(guanidiniumQuery,
numThreads = 0)
self.assertEqual(len(matches), 8)
self.assertEqual(matches, ((66, 67, 68, 69), (66, 67, 69, 68),
(68, 67, 69, 66), (69, 67, 68, 66), (123, 124, 125, 126),
(123, 124, 126, 125), (125, 124, 126, 123), (126, 124, 125, 123)))
matches = suppl.GetSubstructMatches(guanidiniumQuery,
uniquify = True, numThreads = 0)
self.assertEqual(len(matches), 2)
self.assertEqual(matches, ((66, 67, 69, 68), (123, 124, 126, 125)))
def testAtomBondProps(self):
m = Chem.MolFromSmiles('c1ccccc1')
for atom in m.GetAtoms():
d = atom.GetPropsAsDict()
self.assertEquals(set(d.keys()), set(['_CIPRank', '__computedProps']))
self.assertEquals(d['_CIPRank'], 0)
self.assertEquals(list(d['__computedProps']), ['_CIPRank'])
for bond in m.GetBonds():
self.assertEquals(bond.GetPropsAsDict(), {})
def testSDProps(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf')
#fileN = "../FileParsers/test_data/NCI_aids_few.sdf"
sddata = [{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 48',
'NSC': 48, 'NCI_AIDS_Antiviral_Screen_IC50':
'2.00E-04\tM\t=\t2.46E-05\t3',
'_Name': 48, 'CAS_RN': '15716-70-8', '_MolFileComments': '15716-70-8',
'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t3',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 78',
'NSC': 78,
'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t=\t9.80E-05\t3',
'_Name': 78, 'CAS_RN': '6290-84-2', '_MolFileComments': '6290-84-2',
'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t3',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 128',
'NSC': 128,
'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t=\t4.60E-05\t4',
'_Name': 128, 'CAS_RN': '5395-10-8', '_MolFileComments': '5395-10-8',
'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t4',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 163',
'NSC': 163,
'NCI_AIDS_Antiviral_Screen_IC50': '6.75E-04\tM\t>\t6.75E-04\t2',
'_Name': 163, 'CAS_RN': '81-11-8', '_MolFileComments': '81-11-8',
'NCI_AIDS_Antiviral_Screen_EC50': '6.75E-04\tM\t>\t6.75E-04\t2',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 164',
'NSC': 164,
'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t2',
'_Name': 164, 'CAS_RN': '5325-43-9', '_MolFileComments': '5325-43-9',
'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t2',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 170',
'NSC': 170,
'_Name': 170, 'CAS_RN': '999-99-9', '_MolFileComments': '999-99-9',
'NCI_AIDS_Antiviral_Screen_EC50': '9.47E-04\tM\t>\t9.47E-04\t1',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 180',
'NSC': 180,
'NCI_AIDS_Antiviral_Screen_IC50':
'6.46E-04\tM\t=\t5.80E-04\t2\n1.81E-03\tM\t=\t6.90E-04\t2',
'_Name': 180, 'CAS_RN': '69-72-7', '_MolFileComments': '69-72-7',
'NCI_AIDS_Antiviral_Screen_EC50':
'6.46E-04\tM\t>\t6.46E-04\t2\n1.81E-03\tM\t>\t1.81E-03\t2',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 186',
'NSC': 186,
'NCI_AIDS_Antiviral_Screen_IC50': '1.44E-04\tM\t=\t2.49E-05\t2',
'_Name': 186, 'CAS_RN': '518-75-2', '_MolFileComments': '518-75-2',
'NCI_AIDS_Antiviral_Screen_EC50': '1.44E-04\tM\t>\t1.44E-04\t2',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 192',
'NSC': 192,
'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t=\t3.38E-06\t2',
'_Name': 192, 'CAS_RN': '2217-55-2', '_MolFileComments': '2217-55-2',
'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t2',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 203',
'NSC': 203,
'_Name': 203, 'CAS_RN': '1155-00-6', '_MolFileComments': '1155-00-6',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 210',
'NSC': 210,
'NCI_AIDS_Antiviral_Screen_IC50': '1.33E-03\tM\t>\t1.33E-03\t2',
'_Name': 210, 'CAS_RN': '5325-75-7', '_MolFileComments': '5325-75-7',
'NCI_AIDS_Antiviral_Screen_EC50': '1.33E-03\tM\t>\t1.33E-03\t2',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 211',
'NSC': 211,
'NCI_AIDS_Antiviral_Screen_IC50':
'2.00E-04\tM\t>\t2.00E-04\t8\n2.00E-03\tM\t=\t1.12E-03\t2',
'_Name': 211, 'CAS_RN': '5325-76-8', '_MolFileComments': '5325-76-8',
'NCI_AIDS_Antiviral_Screen_EC50':
'2.00E-04\tM\t>\t7.42E-05\t8\n2.00E-03\tM\t=\t6.35E-05\t2',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CM'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 213',
'NSC': 213,
'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t4',
'_Name': 213, 'CAS_RN': '119-80-2', '_MolFileComments': '119-80-2',
'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t4',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 220',
'NSC': 220,
'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t4',
'_Name': 220, 'CAS_RN': '5325-83-7', '_MolFileComments': '5325-83-7',
'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t4',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 229',
'NSC': 229,
'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t2',
'_Name': 229, 'CAS_RN': '5325-88-2', '_MolFileComments': '5325-88-2',
'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t2',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},
{'_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 256',
'NSC': 256,
'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t4',
'_Name': 256, 'CAS_RN': '5326-06-7', '_MolFileComments': '5326-06-7',
'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t4',
'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI'},]
sdSup = Chem.SDMolSupplier(fileN)
for i,mol in enumerate(sdSup):
self.assertEquals(mol.GetPropsAsDict(includePrivate=True), sddata[i])
def testGetSetProps(self):
m = Chem.MolFromSmiles("CC")
errors = {"int": "key `foo` exists but does not result in an integer value",
"double": "key `foo` exists but does not result in a double value",
"bool": "key `foo` exists but does not result in a True or False value"}
for ob in [m, list(m.GetAtoms())[0], list(m.GetBonds())[0]]:
ob.SetDoubleProp("foo", 2.0)
with self.assertRaises(ValueError) as e:
ob.GetBoolProp("foo")
self.assertEquals(str(e.exception), errors["bool"])
with self.assertRaises(ValueError) as e:
ob.GetIntProp("foo")
self.assertEquals(str(e.exception), errors["int"])
ob.SetBoolProp("foo", True)
with self.assertRaises(ValueError) as e:
ob.GetDoubleProp("foo")
self.assertEquals(str(e.exception), errors["double"])
with self.assertRaises(ValueError) as e:
ob.GetIntProp("foo")
self.assertEquals(str(e.exception), errors["int"])
def testInvariantException(self):
m = Chem.MolFromSmiles("C")
try:
m.GetAtomWithIdx(3)
except RuntimeError as e:
import platform
details = str(e)
if platform.system()=='Windows':
details = details.replace('\\','/')
self.assertTrue("Code/GraphMol/ROMol.cpp" in details)
self.assertTrue("Failed Expression: 3 <= 0" in details)
self.assertTrue("RDKIT:" in details)
self.assertTrue(__version__ in details)
# this test should probably always be last since it wraps
# the logging stream
def testLogging(self):
err = sys.stderr
try:
loggers = [("RDKit ERROR", "1", Chem.LogErrorMsg),
("RDKit WARNING", "2", Chem.LogWarningMsg)]
for msg, v, log in loggers:
sys.stderr = six.StringIO()
log(v)
self.assertEquals(sys.stderr.getvalue(), "")
Chem.WrapLogs()
for msg, v, log in loggers:
sys.stderr = six.StringIO()
log(v)
s = sys.stderr.getvalue()
self.assertTrue(msg in s)
finally:
sys.stderr = err
def testGetSDText(self) :
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','FileParsers',
'test_data','NCI_aids_few.sdf')
#fileN = "../FileParsers/test_data/NCI_aids_few.sdf"
sdSup = Chem.SDMolSupplier(fileN)
for m in sdSup:
sdt = Chem.SDWriter.GetText(m)
ts = Chem.SDMolSupplier()
ts.SetData(sdt)
nm = next(ts)
self.assertEqual(Chem.MolToSmiles(m,True),Chem.MolToSmiles(nm,True))
for pn in m.GetPropNames():
self.assertTrue(nm.HasProp(pn))
self.assertEqual(m.GetProp(pn),nm.GetProp(pn))
def testUnfoldedRDKFingerprint(self):
from rdkit.Chem import AllChem
m = Chem.MolFromSmiles('c1ccccc1N')
fp = AllChem.UnfoldedRDKFingerprintCountBased(m)
fpDict = fp.GetNonzeroElements()
self.assertEquals(len(fpDict.items()),19)
self.assertTrue(374073638 in fpDict)
self.assertEquals(fpDict[374073638],6)
self.assertTrue(464351883 in fpDict)
self.assertEquals(fpDict[464351883],2)
self.assertTrue(1949583554 in fpDict)
self.assertEquals(fpDict[1949583554],6)
self.assertTrue(4105342207 in fpDict)
self.assertEquals(fpDict[4105342207],1)
self.assertTrue(794080973 in fpDict)
self.assertEquals(fpDict[794080973],1)
self.assertTrue(3826517238 in fpDict)
self.assertEquals(fpDict[3826517238],2)
m = Chem.MolFromSmiles('Cl')
fp = AllChem.UnfoldedRDKFingerprintCountBased(m)
fpDict = fp.GetNonzeroElements()
self.assertEquals(len(fpDict.items()),0)
m = Chem.MolFromSmiles('CCCO')
aBits = {}
fp = AllChem.UnfoldedRDKFingerprintCountBased(m, bitInfo=aBits)
fpDict = fp.GetNonzeroElements()
self.assertEquals(len(fpDict.items()),5)
self.assertTrue(1524090560 in fpDict)
self.assertEquals(fpDict[1524090560],1)
self.assertTrue(1940446997 in fpDict)
self.assertEquals(fpDict[1940446997],1)
self.assertTrue(3977409745 in fpDict)
self.assertEquals(fpDict[3977409745],1)
self.assertTrue(4274652475 in fpDict)
self.assertEquals(fpDict[4274652475],1)
self.assertTrue(4275705116 in fpDict)
self.assertEquals(fpDict[4275705116],2)
self.assertTrue(1524090560 in aBits)
self.assertEquals(aBits[1524090560],[[1,2]])
self.assertTrue(1940446997 in aBits)
self.assertEquals(aBits[1940446997],[[0,1]])
self.assertTrue(3977409745 in aBits)
self.assertEquals(aBits[3977409745],[[0,1,2]])
self.assertTrue(4274652475 in aBits)
self.assertEquals(aBits[4274652475],[[2]])
self.assertTrue(4275705116 in aBits)
self.assertEquals(aBits[4275705116],[[0],[1]])
def testRDKFingerprintBitInfo(self):
m = Chem.MolFromSmiles('CCCO')
aBits = {}
fp1 = Chem.RDKFingerprint(m, bitInfo=aBits)
self.assertTrue(1183 in aBits)
self.assertEquals(aBits[1183],[[1,2]])
self.assertTrue(709 in aBits)
self.assertEquals(aBits[709],[[0,1]])
self.assertTrue(1118 in aBits)
self.assertEquals(aBits[1118],[[0,1,2]])
self.assertTrue(562 in aBits)
self.assertEquals(aBits[562],[[2]])
self.assertTrue(1772 in aBits)
self.assertEquals(aBits[1772],[[0],[1]])
def testSimpleAromaticity(self):
m=Chem.MolFromSmiles('c1ccccc1')
self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
Chem.Kekulize(m,True)
self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic())
self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic())
Chem.SetAromaticity(m,Chem.AROMATICITY_SIMPLE)
self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
m=Chem.MolFromSmiles('c1c[nH]cc1')
self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
Chem.Kekulize(m,True)
self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic())
self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic())
Chem.SetAromaticity(m,Chem.AROMATICITY_SIMPLE)
self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
m=Chem.MolFromSmiles('c1cccoocc1')
self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
Chem.Kekulize(m,True)
self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic())
self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic())
Chem.SetAromaticity(m,Chem.AROMATICITY_SIMPLE)
self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic())
self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic())
m=Chem.MolFromSmiles('c1ooc1')
self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
Chem.Kekulize(m,True)
self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic())
self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic())
Chem.SetAromaticity(m,Chem.AROMATICITY_SIMPLE)
self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic())
self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic())
m=Chem.MolFromSmiles('C1=CC2=CC=CC=CC2=C1')
self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic())
self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic())
Chem.Kekulize(m,True)
self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic())
self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic())
Chem.SetAromaticity(m,Chem.AROMATICITY_SIMPLE)
self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic())
self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic())
def testGithub955(self):
m = Chem.MolFromSmiles("CCC")
m.GetAtomWithIdx(0).SetProp("foo","1")
self.assertEqual(list(m.GetAtomWithIdx(0).GetPropNames()),["foo"])
m.GetBondWithIdx(0).SetProp("foo","1")
self.assertEqual(list(m.GetBondWithIdx(0).GetPropNames()),["foo"])
def testMDLProps(self):
m = Chem.MolFromSmiles("CCC")
m.GetAtomWithIdx(0).SetAtomMapNum(1)
Chem.SetAtomAlias(m.GetAtomWithIdx(1), "foo")
Chem.SetAtomValue(m.GetAtomWithIdx(1), "bar")
m = Chem.MolFromMolBlock(Chem.MolToMolBlock(m))
self.assertEquals(m.GetAtomWithIdx(0).GetAtomMapNum(), 1)
self.assertEquals(Chem.GetAtomAlias(m.GetAtomWithIdx(1)), "foo")
self.assertEquals(Chem.GetAtomValue(m.GetAtomWithIdx(1)), "bar")
def testSmilesProps(self):
m = Chem.MolFromSmiles("C")
Chem.SetSupplementalSmilesLabel(m.GetAtomWithIdx(0), 'xxx')
self.assertEquals(Chem.MolToSmiles(m), "Cxxx")
if __name__ == '__main__':
unittest.main()
|
adalke/rdkit
|
Code/GraphMol/Wrap/rough_test.py
|
Python
|
bsd-3-clause
| 138,790
|
[
"RDKit"
] |
8a2f4186f38c6d5fba0138e5acbd276fe41cda867a1e420709377b6549d93491
|
#!/usr/local/bin/env python
"""
Test YANK using simple models.
DESCRIPTION
This test suite generates a number of simple models to test the 'Yank' facility.
COPYRIGHT
Written by John D. Chodera <jchodera@gmail.com> while at the University of California Berkeley.
LICENSE
This code is licensed under the latest available version of the GNU General Public License.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
from openmmtools import testsystems
import mdtraj
from mdtraj.utils import enter_temp_directory
from nose import tools
import netCDF4 as netcdf
from yank.repex import ThermodynamicState
from yank.pipeline import find_components
from yank.yank import *
#=============================================================================================
# MODULE CONSTANTS
#=============================================================================================
from simtk import unit
kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA # Boltzmann constant
#=============================================================================================
# MAIN AND TESTS
#=============================================================================================
def test_parameters():
"""Test Yank parameters initialization."""
# Check that both Yank and Repex parameters are accepted
Yank(store_directory='test', randomize_ligand=True, nsteps_per_iteration=1)
@tools.raises(TypeError)
def test_unknown_parameters():
"""Test whether Yank raises exception on wrong initialization."""
Yank(store_directory='test', wrong_parameter=False)
@tools.raises(ValueError)
def test_no_alchemical_atoms():
"""Test whether Yank raises exception when no alchemical atoms are specified."""
toluene = testsystems.TolueneImplicit()
# Create parameters. With the exception of atom_indices, all other
# parameters must be legal, we don't want to catch an exception
# different than the one we are testing.
phase = AlchemicalPhase(name='solvent-implicit', reference_system=toluene.system,
reference_topology=toluene.topology,
positions=toluene.positions, atom_indices={'ligand': []},
protocol=AbsoluteAlchemicalFactory.defaultSolventProtocolImplicit())
thermodynamic_state = ThermodynamicState(temperature=300.0*unit.kelvin)
# Create new simulation.
with enter_temp_directory():
yank = Yank(store_directory='output')
yank.create(thermodynamic_state, phase)
def test_phase_creation():
"""Phases are initialized correctly by Yank.create()."""
phase_name = 'my-solvent-phase'
toluene = testsystems.TolueneImplicit()
protocol = AbsoluteAlchemicalFactory.defaultSolventProtocolImplicit()
atom_indices = find_components(toluene.system, toluene.topology, 'resname TOL')
phase = AlchemicalPhase(phase_name, toluene.system, toluene.topology,
toluene.positions, atom_indices, protocol)
thermodynamic_state = ThermodynamicState(temperature=300.0*unit.kelvin)
# Create new simulation.
with enter_temp_directory():
output_dir = 'output'
utils.config_root_logger(verbose=False)
yank = Yank(store_directory=output_dir)
yank.create(thermodynamic_state, phase)
# Netcdf dataset has been created
nc_path = os.path.join(output_dir, phase_name + '.nc')
assert os.path.isfile(nc_path)
# Read data
try:
nc_file = netcdf.Dataset(nc_path, mode='r')
metadata_group = nc_file.groups['metadata']
serialized_system = metadata_group.variables['reference_system'][0]
serialized_topology = metadata_group.variables['topology'][0]
finally:
nc_file.close()
# Yank doesn't add a barostat to implicit systems
serialized_system = str(serialized_system) # convert unicode
deserialized_system = openmm.XmlSerializer.deserialize(serialized_system)
for force in deserialized_system.getForces():
assert 'Barostat' not in force.__class__.__name__
# Topology has been stored correctly
deserialized_topology = utils.deserialize_topology(serialized_topology)
assert deserialized_topology == mdtraj.Topology.from_openmm(toluene.topology)
def notest_LennardJonesPair(box_width_nsigma=6.0):
"""
Compute binding free energy of two Lennard-Jones particles and compare to numerical result.
Parameters
----------
box_width_nsigma : float, optional, default=6.0
Box width is set to this multiple of Lennard-Jones sigma.
"""
NSIGMA_MAX = 6.0 # number of standard errors tolerated for success
# Create Lennard-Jones pair.
thermodynamic_state = ThermodynamicState(temperature=300.0*unit.kelvin)
kT = kB * thermodynamic_state.temperature
sigma = 3.5 * unit.angstroms
epsilon = 6.0 * kT
test = testsystems.LennardJonesPair(sigma=sigma, epsilon=epsilon)
system, positions = test.system, test.positions
binding_free_energy = test.get_binding_free_energy(thermodynamic_state)
# Create temporary directory for testing.
import tempfile
store_dir = tempfile.mkdtemp()
# Initialize YANK object.
options = dict()
options['number_of_iterations'] = 10
options['platform'] = openmm.Platform.getPlatformByName("Reference") # use Reference platform for speed
options['mc_rotation'] = False
options['mc_displacement'] = True
options['mc_displacement_sigma'] = 1.0 * unit.nanometer
options['timestep'] = 2 * unit.femtoseconds
options['nsteps_per_iteration'] = 50
# Override receptor mass to keep it stationary.
#system.setParticleMass(0, 0)
# Override box vectors.
box_edge = 6*sigma
a = unit.Quantity((box_edge, 0 * unit.angstrom, 0 * unit.angstrom))
b = unit.Quantity((0 * unit.angstrom, box_edge, 0 * unit.angstrom))
c = unit.Quantity((0 * unit.angstrom, 0 * unit.angstrom, box_edge))
system.setDefaultPeriodicBoxVectors(a, b, c)
# Override positions
positions[0,:] = box_edge/2
positions[1,:] = box_edge/4
phase = 'complex-explicit'
# Alchemical protocol.
from yank.alchemy import AlchemicalState
alchemical_states = list()
lambda_values = [0.0, 0.25, 0.50, 0.75, 1.0]
for lambda_value in lambda_values:
alchemical_state = AlchemicalState()
alchemical_state['lambda_electrostatics'] = lambda_value
alchemical_state['lambda_sterics'] = lambda_value
alchemical_states.append(alchemical_state)
protocols = dict()
protocols[phase] = alchemical_states
# Create phases.
alchemical_phase = AlchemicalPhase(phase, system, test.topology, positions,
{'complex-explicit': {'ligand': [1]}},
alchemical_states)
# Create new simulation.
yank = Yank(store_dir, **options)
yank.create(thermodynamic_state, alchemical_phase)
# Run the simulation.
yank.run()
# Analyze the data.
results = yank.analyze()
standard_state_correction = results[phase]['standard_state_correction']
Delta_f = results[phase]['Delta_f_ij'][0,1] - standard_state_correction
dDelta_f = results[phase]['dDelta_f_ij'][0,1]
nsigma = abs(binding_free_energy/kT - Delta_f) / dDelta_f
# Check results against analytical results.
# TODO: Incorporate standard state correction
output = "\n"
output += "Analytical binding free energy : %10.5f +- %10.5f kT\n" % (binding_free_energy / kT, 0)
output += "Computed binding free energy (with standard state correction) : %10.5f +- %10.5f kT (nsigma = %3.1f)\n" % (Delta_f, dDelta_f, nsigma)
output += "Computed binding free energy (without standard state correction): %10.5f +- %10.5f kT (nsigma = %3.1f)\n" % (Delta_f + standard_state_correction, dDelta_f, nsigma)
output += "Standard state correction alone : %10.5f kT\n" % (standard_state_correction)
print(output)
#if (nsigma > NSIGMA_MAX):
# output += "\n"
# output += "Computed binding free energy differs from true binding free energy.\n"
# raise Exception(output)
return [Delta_f, dDelta_f]
if __name__ == '__main__':
from yank import utils
utils.config_root_logger(True, log_file_path='test_LennardJones_pair.log')
box_width_nsigma_values = np.array([3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0])
Delta_f_n = list()
dDelta_f_n = list()
for (n, box_width_nsigma) in enumerate(box_width_nsigma_values):
[Delta_f, dDelta_f] = notest_LennardJonesPair(box_width_nsigma=box_width_nsigma)
Delta_f_n.append(Delta_f)
dDelta_f_n.append(dDelta_f)
Delta_f_n = np.array(Delta_f_n)
dDelta_f_n = np.array(dDelta_f_n)
for (box_width_nsigma, Delta_f, dDelta_f) in zip(box_width_nsigma_values, Delta_f_n, dDelta_f_n):
print("%8.3f %12.6f %12.6f" % (box_width_nsigma, Delta_f, dDelta_f))
|
andrrizzi/yank
|
Yank/tests/test_yank.py
|
Python
|
mit
| 9,253
|
[
"MDTraj",
"NetCDF",
"OpenMM"
] |
c109d213b1c018d10668fd7ab2f12b8541f935692fc9d7ba615b9f6cd2c98cf5
|
# -*- coding: utf-8 -*-
"""
Unit tests for the statistics module.
:copyright: Copyright 2014-2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division
import itertools
import math
import sys
import unittest
import neo
import numpy as np
import quantities as pq
import scipy.integrate as spint
from numpy.testing import assert_array_almost_equal, assert_array_equal, \
assert_array_less
import elephant.kernels as kernels
from elephant import statistics
from elephant.spike_train_generation import homogeneous_poisson_process
if sys.version_info.major == 2:
import unittest2 as unittest
class isi_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
[0.02, 0.71, 1.82, 8.46],
[0.03, 0.14, 0.15, 0.92]])
self.targ_array_2d_0 = np.array([[-0.28, 0.15, 0.95, 7.23],
[0.01, -0.57, -1.67, -7.54]])
self.targ_array_2d_1 = np.array([[0.26, 0.31, 0.36],
[0.69, 1.11, 6.64],
[0.11, 0.01, 0.77]])
self.targ_array_2d_default = self.targ_array_2d_1
self.test_array_1d = self.test_array_2d[0, :]
self.targ_array_1d = self.targ_array_2d_1[0, :]
def test_isi_with_spiketrain(self):
st = neo.SpikeTrain(
self.test_array_1d, units='ms', t_stop=10.0, t_start=0.29)
target = pq.Quantity(self.targ_array_1d, 'ms')
res = statistics.isi(st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_quantities_1d(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(self.targ_array_1d, 'ms')
res = statistics.isi(st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_1d(self):
st = self.test_array_1d
target = self.targ_array_1d
res = statistics.isi(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_default(self):
st = self.test_array_2d
target = self.targ_array_2d_default
res = statistics.isi(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_0(self):
st = self.test_array_2d
target = self.targ_array_2d_0
res = statistics.isi(st, axis=0)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_1(self):
st = self.test_array_2d
target = self.targ_array_2d_1
res = statistics.isi(st, axis=1)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_unsorted_array(self):
np.random.seed(0)
array = np.random.rand(100)
with self.assertWarns(UserWarning):
isi = statistics.isi(array)
class isi_cv_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_regular = np.arange(1, 6)
def test_cv_isi_regular_spiketrain_is_zero(self):
st = neo.SpikeTrain(self.test_array_regular, units='ms', t_stop=10.0)
targ = 0.0
res = statistics.cv(statistics.isi(st))
self.assertEqual(res, targ)
def test_cv_isi_regular_array_is_zero(self):
st = self.test_array_regular
targ = 0.0
res = statistics.cv(statistics.isi(st))
self.assertEqual(res, targ)
class mean_firing_rate_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_3d = np.ones([5, 7, 13])
self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
[0.02, 0.71, 1.82, 8.46],
[0.03, 0.14, 0.15, 0.92]])
self.targ_array_2d_0 = np.array([3, 3, 3, 3])
self.targ_array_2d_1 = np.array([4, 4, 4])
self.targ_array_2d_None = 12
self.targ_array_2d_default = self.targ_array_2d_None
self.max_array_2d_0 = np.array([0.3, 0.71, 1.82, 8.46])
self.max_array_2d_1 = np.array([1.23, 8.46, 0.92])
self.max_array_2d_None = 8.46
self.max_array_2d_default = self.max_array_2d_None
self.test_array_1d = self.test_array_2d[0, :]
self.targ_array_1d = self.targ_array_2d_1[0]
self.max_array_1d = self.max_array_2d_1[0]
def test_invalid_input_spiketrain(self):
# empty spiketrain
self.assertRaises(ValueError, statistics.mean_firing_rate, [])
for st_invalid in (None, 0.1):
self.assertRaises(TypeError, statistics.mean_firing_rate,
st_invalid)
def test_mean_firing_rate_with_spiketrain(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(self.targ_array_1d / 10., '1/ms')
res = statistics.mean_firing_rate(st)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_typical_use_case(self):
np.random.seed(92)
st = homogeneous_poisson_process(rate=100 * pq.Hz, t_stop=100 * pq.s)
rate1 = statistics.mean_firing_rate(st)
rate2 = statistics.mean_firing_rate(st, t_start=st.t_start,
t_stop=st.t_stop)
self.assertEqual(rate1.units, rate2.units)
self.assertAlmostEqual(rate1.item(), rate2.item())
def test_mean_firing_rate_with_spiketrain_set_ends(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(2 / 0.5, '1/ms')
res = statistics.mean_firing_rate(st, t_start=0.4 * pq.ms,
t_stop=0.9 * pq.ms)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_quantities_1d(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(self.targ_array_1d / self.max_array_1d, '1/ms')
res = statistics.mean_firing_rate(st)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_quantities_1d_set_ends(self):
st = pq.Quantity(self.test_array_1d, units='ms')
# t_stop is not a Quantity
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=400 * pq.us, t_stop=1.)
# t_start is not a Quantity
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=0.4, t_stop=1. * pq.ms)
def test_mean_firing_rate_with_plain_array_1d(self):
st = self.test_array_1d
target = self.targ_array_1d / self.max_array_1d
res = statistics.mean_firing_rate(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_1d_set_ends(self):
st = self.test_array_1d
target = self.targ_array_1d / (1.23 - 0.3)
res = statistics.mean_firing_rate(st, t_start=0.3, t_stop=1.23)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_default(self):
st = self.test_array_2d
target = self.targ_array_2d_default / self.max_array_2d_default
res = statistics.mean_firing_rate(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_0(self):
st = self.test_array_2d
target = self.targ_array_2d_0 / self.max_array_2d_0
res = statistics.mean_firing_rate(st, axis=0)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_1(self):
st = self.test_array_2d
target = self.targ_array_2d_1 / self.max_array_2d_1
res = statistics.mean_firing_rate(st, axis=1)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_None(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, None) / 5.
res = statistics.mean_firing_rate(st, axis=None, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_0(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 0) / 5.
res = statistics.mean_firing_rate(st, axis=0, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_1(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 1) / 5.
res = statistics.mean_firing_rate(st, axis=1, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_2(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 2) / 5.
res = statistics.mean_firing_rate(st, axis=2, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_1_set_ends(self):
st = self.test_array_2d
target = np.array([4, 1, 3]) / (1.23 - 0.14)
res = statistics.mean_firing_rate(st, axis=1, t_start=0.14,
t_stop=1.23)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_None(self):
st = self.test_array_2d
target = self.targ_array_2d_None / self.max_array_2d_None
res = statistics.mean_firing_rate(st, axis=None)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_and_units_start_stop_typeerror(
self):
st = self.test_array_2d
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'))
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=10.)
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=0.,
t_stop=pq.Quantity(10, 'ms'))
class FanoFactorTestCase(unittest.TestCase):
def setUp(self):
np.random.seed(100)
num_st = 300
self.test_spiketrains = []
self.test_array = []
self.test_quantity = []
self.test_list = []
self.sp_counts = np.zeros(num_st)
for i in range(num_st):
r = np.random.rand(np.random.randint(20) + 1)
st = neo.core.SpikeTrain(r * pq.ms,
t_start=0.0 * pq.ms,
t_stop=20.0 * pq.ms)
self.test_spiketrains.append(st)
self.test_array.append(r)
self.test_quantity.append(r * pq.ms)
self.test_list.append(list(r))
# for cross-validation
self.sp_counts[i] = len(st)
def test_fanofactor_spiketrains(self):
# Test with list of spiketrains
self.assertEqual(
np.var(self.sp_counts) / np.mean(self.sp_counts),
statistics.fanofactor(self.test_spiketrains))
# One spiketrain in list
st = self.test_spiketrains[0]
self.assertEqual(statistics.fanofactor([st]), 0.0)
def test_fanofactor_empty(self):
# Test with empty list
self.assertTrue(np.isnan(statistics.fanofactor([])))
self.assertTrue(np.isnan(statistics.fanofactor([[]])))
# Test with empty quantity
self.assertTrue(np.isnan(statistics.fanofactor([] * pq.ms)))
# Empty spiketrain
st = neo.core.SpikeTrain([] * pq.ms, t_start=0 * pq.ms,
t_stop=1.5 * pq.ms)
self.assertTrue(np.isnan(statistics.fanofactor(st)))
def test_fanofactor_spiketrains_same(self):
# Test with same spiketrains in list
sts = [self.test_spiketrains[0]] * 3
self.assertEqual(statistics.fanofactor(sts), 0.0)
def test_fanofactor_array(self):
self.assertEqual(statistics.fanofactor(self.test_array),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_array_same(self):
lst = [self.test_array[0]] * 3
self.assertEqual(statistics.fanofactor(lst), 0.0)
def test_fanofactor_quantity(self):
self.assertEqual(statistics.fanofactor(self.test_quantity),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_quantity_same(self):
lst = [self.test_quantity[0]] * 3
self.assertEqual(statistics.fanofactor(lst), 0.0)
def test_fanofactor_list(self):
self.assertEqual(statistics.fanofactor(self.test_list),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_list_same(self):
lst = [self.test_list[0]] * 3
self.assertEqual(statistics.fanofactor(lst), 0.0)
def test_fanofactor_different_durations(self):
st1 = neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=4 * pq.s)
st2 = neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=4.5 * pq.s)
self.assertWarns(UserWarning, statistics.fanofactor, (st1, st2))
def test_fanofactor_wrong_type(self):
# warn_tolerance is not a quantity
st1 = neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=4 * pq.s)
self.assertRaises(TypeError, statistics.fanofactor, [st1],
warn_tolerance=1e-4)
class LVTestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 0.971826029994
def test_lv_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(statistics.lv(seq), self.target, decimal=9)
def test_lv_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(statistics.lv(seq), self.target, decimal=9)
def test_lv_with_list(self):
seq = self.test_seq
assert_array_almost_equal(statistics.lv(seq), self.target, decimal=9)
def test_lv_raise_error(self):
seq = self.test_seq
self.assertRaises(ValueError, statistics.lv, [])
self.assertRaises(ValueError, statistics.lv, 1)
self.assertRaises(ValueError, statistics.lv, np.array([seq, seq]))
def test_2short_spike_train(self):
seq = [1]
with self.assertWarns(UserWarning):
"""
Catches UserWarning: Input size is too small. Please provide
an input with more than 1 entry.
"""
self.assertTrue(math.isnan(statistics.lv(seq, with_nan=True)))
class LVRTestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 2.1845363464753134
def test_lvr_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(statistics.lvr(seq), self.target, decimal=9)
def test_lvr_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(statistics.lvr(seq), self.target, decimal=9)
def test_lvr_with_list(self):
seq = self.test_seq
assert_array_almost_equal(statistics.lvr(seq), self.target, decimal=9)
def test_lvr_raise_error(self):
seq = self.test_seq
self.assertRaises(ValueError, statistics.lvr, [])
self.assertRaises(ValueError, statistics.lvr, 1)
self.assertRaises(ValueError, statistics.lvr, np.array([seq, seq]))
self.assertRaises(ValueError, statistics.lvr, seq, -1 * pq.ms)
def test_lvr_refractoriness_kwarg(self):
seq = np.array(self.test_seq)
with self.assertWarns(UserWarning):
assert_array_almost_equal(statistics.lvr(seq, R=5),
self.target, decimal=9)
def test_2short_spike_train(self):
seq = [1]
with self.assertWarns(UserWarning):
"""
Catches UserWarning: Input size is too small. Please provide
an input with more than 1 entry.
"""
self.assertTrue(math.isnan(statistics.lvr(seq, with_nan=True)))
class CV2TestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 1.0022235296529176
def test_cv2_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(statistics.cv2(seq), self.target, decimal=9)
def test_cv2_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(statistics.cv2(seq), self.target, decimal=9)
def test_cv2_with_list(self):
seq = self.test_seq
assert_array_almost_equal(statistics.cv2(seq), self.target, decimal=9)
def test_cv2_raise_error(self):
seq = self.test_seq
self.assertRaises(ValueError, statistics.cv2, [])
self.assertRaises(ValueError, statistics.cv2, 1)
self.assertRaises(ValueError, statistics.cv2, np.array([seq, seq]))
class InstantaneousRateTest(unittest.TestCase):
def setUp(self):
# create a poisson spike train:
self.st_tr = (0, 20.0) # seconds
self.st_dur = self.st_tr[1] - self.st_tr[0] # seconds
self.st_margin = 5.0 # seconds
self.st_rate = 10.0 # Hertz
np.random.seed(19)
duration_effective = self.st_dur - 2 * self.st_margin
st_num_spikes = np.random.poisson(self.st_rate * duration_effective)
spike_train = sorted(
np.random.rand(st_num_spikes) *
duration_effective +
self.st_margin)
# convert spike train into neo objects
self.spike_train = neo.SpikeTrain(spike_train * pq.s,
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s)
# generation of a multiply used specific kernel
self.kernel = kernels.TriangularKernel(sigma=0.03 * pq.s)
def test_instantaneous_rate_and_warnings(self):
st = self.spike_train
sampling_period = 0.01 * pq.s
with self.assertWarns(UserWarning):
# Catches warning: The width of the kernel was adjusted to a
# minimally allowed width.
inst_rate = statistics.instantaneous_rate(
st, sampling_period, self.kernel, cutoff=0)
self.assertIsInstance(inst_rate, neo.core.AnalogSignal)
self.assertEqual(
inst_rate.sampling_period.simplified, sampling_period.simplified)
self.assertEqual(inst_rate.simplified.units, pq.Hz)
self.assertEqual(inst_rate.t_stop.simplified, st.t_stop.simplified)
self.assertEqual(inst_rate.t_start.simplified, st.t_start.simplified)
def test_error_instantaneous_rate(self):
self.assertRaises(
TypeError, statistics.instantaneous_rate,
spiketrains=[1, 2, 3] * pq.s,
sampling_period=0.01 * pq.ms, kernel=self.kernel)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=[1, 2, 3],
sampling_period=0.01 * pq.ms, kernel=self.kernel)
st = self.spike_train
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01, kernel=self.kernel)
self.assertRaises(
ValueError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=-0.01 * pq.ms, kernel=self.kernel)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel='NONE')
self.assertRaises(TypeError, statistics.instantaneous_rate,
self.spike_train,
sampling_period=0.01 * pq.s, kernel='wrong_string',
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s,
trim=False)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel,
cutoff=20 * pq.ms)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel, t_start=2)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel,
t_stop=20 * pq.mV)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel, trim=1)
# cannot estimate a kernel for a list of spiketrains
self.assertRaises(ValueError, statistics.instantaneous_rate,
spiketrains=[st, st], sampling_period=10 * pq.ms,
kernel='auto')
def test_rate_estimation_consistency(self):
"""
Test, whether the integral of the rate estimation curve is (almost)
equal to the number of spikes of the spike train.
"""
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.Kernel) and
kern_cls is not kernels.Kernel and
kern_cls is not kernels.SymmetricKernel)
kernels_available = [kern_cls(sigma=0.5 * pq.s, invert=False)
for kern_cls in kernel_types]
kernels_available.append('auto')
kernel_resolution = 0.01 * pq.s
for kernel in kernels_available:
for center_kernel in (False, True):
rate_estimate = statistics.instantaneous_rate(
self.spike_train,
sampling_period=kernel_resolution,
kernel=kernel,
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s,
trim=False,
center_kernel=center_kernel)
num_spikes = len(self.spike_train)
auc = spint.cumtrapz(
y=rate_estimate.magnitude.squeeze(),
x=rate_estimate.times.simplified.magnitude)[-1]
self.assertAlmostEqual(num_spikes, auc,
delta=0.01 * num_spikes)
def test_not_center_kernel(self):
# issue 107
t_spike = 1 * pq.s
st = neo.SpikeTrain([t_spike], t_start=0 * pq.s, t_stop=2 * pq.s,
units=pq.s)
kernel = kernels.AlphaKernel(200 * pq.ms)
fs = 0.1 * pq.ms
rate = statistics.instantaneous_rate(st,
sampling_period=fs,
kernel=kernel,
center_kernel=False)
rate_nonzero_index = np.nonzero(rate > 1e-6)[0]
# where the mass is concentrated
rate_mass = rate.times.rescale(t_spike.units)[rate_nonzero_index]
all_after_response_onset = (rate_mass >= t_spike).all()
self.assertTrue(all_after_response_onset)
def test_regression_288(self):
np.random.seed(9)
sampling_period = 200 * pq.ms
spiketrain = homogeneous_poisson_process(10 * pq.Hz,
t_start=0 * pq.s,
t_stop=10 * pq.s)
kernel = kernels.AlphaKernel(sigma=5 * pq.ms, invert=True)
# check that instantaneous_rate "works" for kernels with small sigma
# without triggering an incomprehensible error
rate = statistics.instantaneous_rate(spiketrain,
sampling_period=sampling_period,
kernel=kernel)
self.assertEqual(
len(rate), (spiketrain.t_stop / sampling_period).simplified.item())
def test_small_kernel_sigma(self):
# Test that the instantaneous rate is overestimated when
# kernel.sigma << sampling_period and center_kernel is True.
# The setup is set to match the issue 288.
np.random.seed(9)
sampling_period = 200 * pq.ms
sigma = 5 * pq.ms
rate_expected = 10 * pq.Hz
spiketrain = homogeneous_poisson_process(rate_expected,
t_start=0 * pq.s,
t_stop=10 * pq.s)
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.Kernel) and
kern_cls is not kernels.Kernel and
kern_cls is not kernels.SymmetricKernel)
for kern_cls, invert in itertools.product(kernel_types, (False, True)):
kernel = kern_cls(sigma=sigma, invert=invert)
with self.subTest(kernel=kernel):
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=sampling_period,
kernel=kernel, center_kernel=True)
self.assertGreater(rate.mean(), rate_expected)
def test_spikes_on_edges(self):
# this test demonstrates that the trimming (convolve valid mode)
# removes the edge spikes, underestimating the true firing rate and
# thus is not able to reconstruct the number of spikes in a
# spiketrain (see test_rate_estimation_consistency)
cutoff = 5
sampling_period = 0.01 * pq.s
t_spikes = np.array([-cutoff, cutoff]) * pq.s
spiketrain = neo.SpikeTrain(t_spikes, t_start=t_spikes[0],
t_stop=t_spikes[-1])
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.Kernel) and
kern_cls is not kernels.Kernel and
kern_cls is not kernels.SymmetricKernel)
kernels_available = [kern_cls(sigma=1 * pq.s, invert=False)
for kern_cls in kernel_types]
for kernel in kernels_available:
for center_kernel in (False, True):
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=sampling_period,
kernel=kernel,
cutoff=cutoff, trim=True,
center_kernel=center_kernel)
assert_array_almost_equal(rate.magnitude, 0, decimal=3)
def test_trim_as_convolve_mode(self):
cutoff = 5
sampling_period = 0.01 * pq.s
t_spikes = np.linspace(-cutoff, cutoff, num=(2 * cutoff + 1)) * pq.s
spiketrain = neo.SpikeTrain(t_spikes, t_start=t_spikes[0],
t_stop=t_spikes[-1])
kernel = kernels.RectangularKernel(sigma=1 * pq.s)
assert cutoff > kernel.min_cutoff, "Choose larger cutoff"
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.SymmetricKernel) and
kern_cls is not kernels.SymmetricKernel)
kernels_symmetric = [kern_cls(sigma=1 * pq.s, invert=False)
for kern_cls in kernel_types]
for kernel in kernels_symmetric:
for trim in (False, True):
rate_centered = statistics.instantaneous_rate(
spiketrain, sampling_period=sampling_period,
kernel=kernel, cutoff=cutoff, trim=trim)
rate_convolve = statistics.instantaneous_rate(
spiketrain, sampling_period=sampling_period,
kernel=kernel, cutoff=cutoff, trim=trim,
center_kernel=False)
assert_array_almost_equal(rate_centered, rate_convolve)
def test_instantaneous_rate_spiketrainlist(self):
np.random.seed(19)
duration_effective = self.st_dur - 2 * self.st_margin
st_num_spikes = np.random.poisson(self.st_rate * duration_effective)
spike_train2 = sorted(
np.random.rand(st_num_spikes) *
duration_effective +
self.st_margin)
spike_train2 = neo.SpikeTrain(spike_train2 * pq.s,
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s)
st_rate_1 = statistics.instantaneous_rate(self.spike_train,
sampling_period=0.01 * pq.s,
kernel=self.kernel)
st_rate_2 = statistics.instantaneous_rate(spike_train2,
sampling_period=0.01 * pq.s,
kernel=self.kernel)
combined_rate = statistics.instantaneous_rate(
[self.spike_train, spike_train2],
sampling_period=0.01 * pq.s,
kernel=self.kernel)
rate_concat = np.c_[st_rate_1, st_rate_2]
# 'time_vector.dtype' in instantaneous_rate() is changed from float64
# to float32 which results in 3e-6 abs difference
assert_array_almost_equal(combined_rate.magnitude,
rate_concat.magnitude, decimal=5)
# Regression test for #144
def test_instantaneous_rate_regression_144(self):
# The following spike train contains spikes that are so close to each
# other, that the optimal kernel cannot be detected. Therefore, the
# function should react with a ValueError.
st = neo.SpikeTrain([2.12, 2.13, 2.15] * pq.s, t_stop=10 * pq.s)
self.assertRaises(ValueError, statistics.instantaneous_rate, st,
1 * pq.ms)
# Regression test for #245
def test_instantaneous_rate_regression_245(self):
# This test makes sure that the correct kernel width is chosen when
# selecting 'auto' as kernel
spiketrain = neo.SpikeTrain(
range(1, 30) * pq.ms, t_start=0 * pq.ms, t_stop=30 * pq.ms)
# This is the correct procedure to attain the kernel: first, the result
# of sskernel retrieves the kernel bandwidth of an optimal Gaussian
# kernel in terms of its standard deviation sigma, then uses this value
# directly in the function for creating the Gaussian kernel
kernel_width_sigma = statistics.optimal_kernel_bandwidth(
spiketrain.magnitude, times=None, bootstrap=False)['optw']
kernel = kernels.GaussianKernel(kernel_width_sigma * spiketrain.units)
result_target = statistics.instantaneous_rate(
spiketrain, 10 * pq.ms, kernel=kernel)
# Here, we check if the 'auto' argument leads to the same operation. In
# the regression, it was incorrectly assumed that the sskernel()
# function returns the actual bandwidth of the kernel, which is defined
# as approximately bandwidth = sigma * 5.5 = sigma * (2 * 2.75).
# factor 2.0 connects kernel width with its half width,
# factor 2.7 connects half width of Gaussian distribution with
# 99% probability mass with its standard deviation.
result_automatic = statistics.instantaneous_rate(
spiketrain, 10 * pq.ms, kernel='auto')
assert_array_almost_equal(result_target, result_automatic)
def test_instantaneous_rate_grows_with_sampling_period(self):
np.random.seed(0)
rate_expected = 10 * pq.Hz
spiketrain = homogeneous_poisson_process(rate=rate_expected,
t_stop=10 * pq.s)
kernel = kernels.GaussianKernel(sigma=100 * pq.ms)
rates_mean = []
for sampling_period in np.linspace(1, 1000, num=10) * pq.ms:
with self.subTest(sampling_period=sampling_period):
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=sampling_period,
kernel=kernel)
rates_mean.append(rate.mean())
# rate means are greater or equal the expected rate
assert_array_less(rate_expected, rates_mean)
# check sorted
self.assertTrue(np.all(rates_mean[:-1] < rates_mean[1:]))
# Regression test for #360
def test_centered_at_origin(self):
# Skip RectangularKernel because it doesn't have a strong peak.
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.SymmetricKernel) and
kern_cls not in (kernels.SymmetricKernel,
kernels.RectangularKernel))
kernels_symmetric = [kern_cls(sigma=50 * pq.ms, invert=False)
for kern_cls in kernel_types]
# first part: a symmetric spiketrain with a symmetric kernel
spiketrain = neo.SpikeTrain(np.array([-0.0001, 0, 0.0001]) * pq.s,
t_start=-1,
t_stop=1)
for kernel in kernels_symmetric:
rate = statistics.instantaneous_rate(spiketrain,
sampling_period=20 * pq.ms,
kernel=kernel)
# the peak time must be centered at origin
self.assertEqual(rate.times[np.argmax(rate)], 0)
# second part: a single spike at t=0
periods = [2 ** c for c in range(-3, 6)]
for period in periods:
with self.subTest(period=period):
spiketrain = neo.SpikeTrain(np.array([0]) * pq.s,
t_start=-period * 10 * pq.ms,
t_stop=period * 10 * pq.ms)
for kernel in kernels_symmetric:
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=period * pq.ms,
kernel=kernel)
self.assertEqual(rate.times[np.argmax(rate)], 0)
def test_annotations(self):
spiketrain = neo.SpikeTrain([1, 2], t_stop=2 * pq.s, units=pq.s)
kernel = kernels.AlphaKernel(sigma=100 * pq.ms)
rate = statistics.instantaneous_rate(spiketrain,
sampling_period=10 * pq.ms,
kernel=kernel)
kernel_annotation = dict(type=type(kernel).__name__,
sigma=str(kernel.sigma),
invert=kernel.invert)
self.assertIn('kernel', rate.annotations)
self.assertEqual(rate.annotations['kernel'], kernel_annotation)
class TimeHistogramTestCase(unittest.TestCase):
def setUp(self):
self.spiketrain_a = neo.SpikeTrain(
[0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_b = neo.SpikeTrain(
[0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrains = [self.spiketrain_a, self.spiketrain_b]
def tearDown(self):
del self.spiketrain_a
self.spiketrain_a = None
del self.spiketrain_b
self.spiketrain_b = None
def test_time_histogram(self):
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
def test_time_histogram_binary(self):
targ = np.array([2, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
binary=True)
assert_array_equal(targ, histogram.magnitude[:, 0])
def test_time_histogram_tstart_tstop(self):
# Start, stop short range
targ = np.array([2, 1])
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
t_start=5 * pq.s,
t_stop=7 * pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
# Test without t_stop
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = statistics.time_histogram(self.spiketrains,
bin_size=1 * pq.s,
t_start=0 * pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
# Test without t_start
histogram = statistics.time_histogram(self.spiketrains,
bin_size=1 * pq.s,
t_stop=10 * pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
def test_time_histogram_output(self):
# Normalization mean
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
output='mean')
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0], dtype=float) / 2
assert_array_equal(targ.reshape(targ.size, 1), histogram.magnitude)
# Normalization rate
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
output='rate')
assert_array_equal(histogram.view(pq.Quantity),
targ.reshape(targ.size, 1) * 1 / pq.s)
# Normalization unspecified, raises error
self.assertRaises(ValueError, statistics.time_histogram,
self.spiketrains,
bin_size=pq.s, output=' ')
def test_annotations(self):
np.random.seed(1)
spiketrains = [homogeneous_poisson_process(
rate=10 * pq.Hz, t_stop=10 * pq.s) for _ in range(10)]
for output in ("counts", "mean", "rate"):
histogram = statistics.time_histogram(spiketrains,
bin_size=3 * pq.ms,
output=output)
self.assertIn('normalization', histogram.annotations)
self.assertEqual(histogram.annotations['normalization'], output)
class ComplexityPdfTestCase(unittest.TestCase):
def setUp(self):
self.spiketrain_a = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_b = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_c = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrains = [
self.spiketrain_a, self.spiketrain_b, self.spiketrain_c]
def tearDown(self):
del self.spiketrain_a
self.spiketrain_a = None
del self.spiketrain_b
self.spiketrain_b = None
def test_complexity_pdf(self):
targ = np.array([0.92, 0.01, 0.01, 0.06])
complexity = statistics.complexity_pdf(self.spiketrains,
bin_size=0.1 * pq.s)
assert_array_equal(targ, complexity.magnitude[:, 0])
self.assertEqual(1, complexity.magnitude[:, 0].sum())
self.assertEqual(len(self.spiketrains) + 1, len(complexity))
self.assertIsInstance(complexity, neo.AnalogSignal)
self.assertEqual(complexity.units, 1 * pq.dimensionless)
if __name__ == '__main__':
unittest.main()
|
JuliaSprenger/elephant
|
elephant/test/test_statistics.py
|
Python
|
bsd-3-clause
| 42,632
|
[
"Gaussian"
] |
66f9d021dd48574ea7bc29c88a2bd2a290280e607f620158fc2f2cbaa6fecd81
|
import time
import random
import numpy as np
import pypolyagamma as pypolyagamma
def calculate_C_w(S, w_i):
w_mat = np.diag(w_i)
return np.dot(S.T, np.dot(w_mat, S))
def sample_w_i(S, J_i):
"""
:param S: observation matrix
:param J_i: neuron i's couplings
:return: samples for w_i from a polyagamma distribution
"""
nthreads = pypolyagamma.get_omp_num_threads()
seeds = np.random.randint(2 ** 16, size=nthreads)
ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds]
T = S.shape[0]
A = np.ones(T)
w_i = np.zeros(T)
# print 'will sample w'
# print nthreads
# ppg.pgdrawv(A, 2. * np.dot(S, J_i), w_i)
pypolyagamma.pgdrawvpar(ppgs, A, np.dot(S, J_i), w_i)
# print 'sampled w'
return w_i
def sample_J_i(S, C, D_i, w_i, gamma_i, sigma_J):
N = S.shape[1]
J_i = np.zeros(N)
included_ind = list(np.where(gamma_i > 0)[0])
if len(included_ind) == 0:
return J_i
cov_mat = (1. / sigma_J) * np.identity(N)
C_gamma = C[:, included_ind][included_ind, :]
cov_mat_gamma = cov_mat[included_ind, :][:, included_ind]
D_i_gamma = D_i[included_ind]
cov = np.linalg.inv(C_gamma + cov_mat_gamma)
mean = np.dot(cov, D_i_gamma)
J_i_gamma = np.random.multivariate_normal(mean, cov)
J_i[included_ind] = J_i_gamma
return J_i
def calc_block_dets(C_gamma, j_rel, sigma_J, num_active):
cov_mat = (1. / sigma_J) * np.identity(num_active)
mat = cov_mat + C_gamma
A = mat[:j_rel, :j_rel]
B_1 = mat[:j_rel, j_rel:]
C_1 = mat[j_rel:, :j_rel]
D_1 = mat[j_rel:, j_rel:]
B_0 = mat[:j_rel, j_rel + 1:]
C_0 = mat[j_rel + 1:, :j_rel]
D_0 = mat[j_rel + 1:, j_rel + 1:]
det_cov_1 = float(num_active) * sigma_J
det_cov_0 = float(num_active - 1) * sigma_J
# import ipdb;ipdb.set_trace()
# If the matrix is small don't bother to split
if mat.shape[0] < 5.:
pre_factor_1 = (det_cov_1 / np.linalg.det(mat))
pre_factor_0 = (det_cov_0 / np.linalg.det(np.delete(np.delete(mat, j_rel, 0), j_rel, 1)))
elif j_rel == 0:
pre_factor_0 = (det_cov_0 / np.linalg.det(D_0))
pre_factor_1 = (det_cov_1 / np.linalg.det(mat))
elif j_rel == num_active - 1:
pre_factor_0 = (det_cov_0 / np.linalg.det(A))
pre_factor_1 = (det_cov_1 / np.linalg.det(mat))
else:
det_A = np.linalg.det(A)
A_inv = np.linalg.inv(A)
pre_factor_0 = det_cov_0 / (det_A * np.linalg.det(D_0 - np.dot(C_0, np.dot(A_inv, B_0))))
pre_factor_1 = det_cov_1 / (det_A * np.linalg.det(D_1 - np.dot(C_1, np.dot(A_inv, B_1))))
return np.sqrt(pre_factor_0), np.sqrt(pre_factor_1)
def calc_gamma_prob(sigma_J, C_gamma, D_i_gamma, ro, j_rel):
# import ipdb; ipdb.set_trace()
num_active = D_i_gamma.shape[0] # How manny gammas are equal to 1
cov_mat = 1. / sigma_J * np.identity(num_active)
mat = cov_mat + C_gamma
mat_inv = np.linalg.inv(mat)
mat_0_inv = np.linalg.inv(np.delete(np.delete(mat, j_rel, 0), j_rel, 1))
D_i_gamma_0 = np.delete(D_i_gamma, j_rel)
# calculate determinant with and without j in block form
prefactor_0, prefactor_1 = calc_block_dets(C_gamma, j_rel, sigma_J, num_active)
# prefactor_1 = np.sqrt(np.linalg.det(mat_inv) * np.linalg.det(cov_mat))
# prefactor_0 = np.sqrt(np.linalg.det(mat_0_inv) * np.linalg.det(np.delete(np.delete(cov_mat, j_rel, 0), j_rel, 1)))
sq_1 = 0.5 * np.dot(D_i_gamma.T, np.dot(mat_inv, D_i_gamma))
sq_0 = 0.5 * np.dot(D_i_gamma_0.T, np.dot(mat_0_inv, D_i_gamma_0))
new_ro = 1. / (1. + np.exp(sq_0 - sq_1 + np.log(1. - ro) - np.log(ro) +
np.log(prefactor_0) - np.log(prefactor_1)))
return new_ro
def sample_gamma_i(gamma_i, D_i, C, ro, sigmma_J):
N = C.shape[0]
for j in range(N):
# import ipdb; ipdb.set_trace()
gamma_i[j] = 1.
active_indices = np.where(gamma_i > 0)[0]
# Don't allow a network with 0 connections
if len(active_indices) == 1.:
continue
j_rel = j - np.where(gamma_i[:j] == 0)[0].shape[0]
D_i_gamma = D_i[active_indices]
C_gamma = C[:, active_indices][active_indices, :]
new_ro = calc_gamma_prob(sigmma_J, C_gamma, D_i_gamma, ro, j_rel)
# import ipdb; ipdb.set_trace()
# try:
gamma_i[j] = np.random.binomial(1, new_ro, 1)
# except ValueError:
# import ipdb;
# ipdb.set_trace()
return gamma_i
def sample_neuron(samp_num, burnin, sigma_J, S, D_i, ro, thin=0, save_all=True):
""" This function uses the Gibbs sampler to sample from w, gamma and J
:param samp_num: Number of samples to be drawn
:param burnin: Number of samples to burn in
:param sigma_J: variance of the J slab
:param S: Neurons' activity matrix. Including S0. (T + 1) x N
:param C: observation correlation matrix. N x N
:param D_i: time delay correlations of neuron i. N
:return: samp_num samples (each one of length K (time_steps)) from the posterior distribution for w,x,z.
"""
# random.seed(seed)
T, N = S.shape
# actual number of samples needed with thining and burin-in
if (thin != 0):
N_s = samp_num * thin + burnin
else:
N_s = samp_num + burnin
samples_w_i = np.zeros((N_s, T), dtype=np.float32)
samples_J_i = np.zeros((N_s, N), dtype=np.float32)
samples_gamma_i = np.zeros((N_s, N), dtype=np.float32)
# gamma_i = np.random.binomial(1, ro, N)
gamma_i = np.ones(N)
J_i = np.multiply(gamma_i, np.random.normal(0, sigma_J, N))
for i in xrange(N_s):
# import ipdb; ipdb.set_trace()
w_i = sample_w_i(S, J_i)
C_w_i = calculate_C_w(S, w_i)
gamma_i = sample_gamma_i(gamma_i, D_i, C_w_i, ro, sigma_J)
J_i = sample_J_i(S, C_w_i, D_i, w_i, gamma_i, sigma_J)
samples_w_i[i, :] = w_i
samples_J_i[i, :] = J_i
samples_gamma_i[i, :] = gamma_i
if thin == 0:
return samples_w_i[burnin:, :], samples_J_i[burnin:, :], samples_gamma_i[burnin:, :]
else:
return samples_w_i[burnin:N_s:thin, :], samples_J_i[burnin:N_s:thin, :], \
samples_gamma_i[burnin:N_s:thin, :]
|
noashin/Ising_model_gibbs_sampler
|
sampler.py
|
Python
|
mit
| 6,253
|
[
"NEURON"
] |
cd55b48281ce401b60ae9caf7c1118b3d114e31a0ec829cfcac169291e7df6f6
|
# -*- coding: utf-8 -*-
"""
A replication of
Chaudhuri, Rishidev, et al.
"A large-scale circuit mechanism for hierarchical dynamical processing in the primate cortex."
Neuron 88.2 (2015): 419-431.
Figure 7D can not be reproduced exactly because the parameters used to generate
the figure in original paper is of higher precision than the parameters reported.
"""
from __future__ import division
import pickle
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import least_squares
def F(I, a=0.27, b=108., d=0.154):
"""F(I) for vector I"""
return (a*I - b)/(1.-np.exp(-d*(a*I - b)))
class Model(object):
"""
A model of multiple interacting brain areas from Chaudhuri et al. Neuron 2015
"""
def __init__(self, datafile='subgraph_data.pkl', ext_params={}):
#---------------------------------------------------------------------------------
# Loading Anatomical Data from Kennedy's group
#---------------------------------------------------------------------------------
with open(datafile,'rb') as f:
p = pickle.load(f)
print 'Initializing Model. From ' + datafile + ' load:',
print p.keys()
p['hier_vals'] = p['hier_vals']/max(p['hier_vals']) # Normalized between 0 and 1
p['n_area'] = len(p['areas'])
#---------------------------------------------------------------------------------
# Network Parameters
#---------------------------------------------------------------------------------
p['beta_inh'] = 0.351 # Hz/pA
p['tau_exc'] = 60 # ms
p['tau_inh'] = 10 # ms
p['wEE'] = 250.2 # pA
p['wIE'] = 303.9 # pA
p['wEI'] = 8.11 # pA/Hz
p['wII'] = 12.5 # pA/Hz
p['muEE'] = 125.1 # pA/Hz
p['eta'] = 3.4
p['gamma'] = 0.641
for key, value in ext_params.iteritems():
p[key] = value
self.fI = lambda x : x*(x>0) # f-I curve
p['exc_scale'] = (1+p['eta']*p['hier_vals'])
self.p = p
def run_stimulus(self, plotfile='ReplicateChaudhuri2015_Fig3A.pdf'):
area_act = 'V1'
print 'Running network with stimulation to ' + area_act
#---------------------------------------------------------------------------------
# Redefine Parameters
#---------------------------------------------------------------------------------
p = self.p
# Definition of combined parameters
local_EE = p['wEE'] * p['exc_scale']
local_EI = -p['wEI']
local_IE = p['beta_inh'] * p['wIE'] * p['exc_scale']
local_II = -p['beta_inh'] * p['wII']
fln_scaled = (p['exc_scale'] * p['fln_mat'].T).T
#---------------------------------------------------------------------------------
# Simulation Parameters
#---------------------------------------------------------------------------------
dt = 0.2 # ms
T = 2500 # ms
t_plot = np.linspace(0, T, int(T/dt)+1)
n_t = len(t_plot)
I_bkg_exc = 400.0
I_bkg_inh = 61.76
# Solve for baseline gating variable and firing rates.
def _solver(s_n):
r_inh = p['beta_inh'] * (p['exc_scale'] * p['wIE'] *
s_n + I_bkg_inh / p['beta_inh']) / (1 + p['beta_inh']
* p['wII'])
longrange_E = np.dot(fln_scaled,s_n)
r_exc = F(local_EE * s_n + local_EI * r_inh + p['muEE'] * longrange_E +
I_bkg_exc)
return s_n - p['gamma'] * (p['tau_exc'] /
1000) * r_exc / (1 + p['gamma'] * (p['tau_exc'] /
1000) * r_exc)
x0 = np.ones(p['n_area']) * 0.05
s_n_tgt = least_squares(_solver, x0, bounds=(np.zeros(p['n_area']),
np.ones(p['n_area'])))['x']
r_inh_tgt = p['beta_inh'] * (p['exc_scale'] * p['wIE'] *
s_n_tgt + I_bkg_inh /
p['beta_inh']) / (1 + p['beta_inh'] * p['wII'])
longrange_E = np.dot(fln_scaled,s_n_tgt)
I_exc = local_EE*s_n_tgt + local_EI*r_inh_tgt + \
p['muEE'] * longrange_E + I_bkg_exc
r_exc_tgt = F(I_exc)
# Set stimulus input
I_stim_exc = np.zeros((n_t,p['n_area']))
area_stim_idx = p['areas'].index(area_act) # Index of stimulated area
time_idx = (t_plot>100) & (t_plot<=350)
I_stim_exc[time_idx, area_stim_idx] = 200.0
#---------------------------------------------------------------------------------
# Storage
#---------------------------------------------------------------------------------
s_n = np.zeros((n_t,p['n_area']))
r_exc = np.zeros((n_t,p['n_area']))
r_inh = np.zeros((n_t,p['n_area']))
#---------------------------------------------------------------------------------
# Initialization
#---------------------------------------------------------------------------------
# Set activity to background firing
s_n[0] = s_n_tgt
r_inh[0] = r_inh_tgt
r_exc[0] = r_exc_tgt
#---------------------------------------------------------------------------------
# Running the network
#---------------------------------------------------------------------------------
for i_t in xrange(1, n_t):
d_s_n = -s_n[i_t-1] + p['gamma'] * (p['tau_exc']/
1000) * (1-s_n[i_t-1]) * r_exc[i_t-1]
s_n[i_t] = s_n[i_t-1] + d_s_n * dt/p['tau_exc']
longrange_E = np.dot(fln_scaled,s_n[i_t])
I_exc = local_EE*s_n[i_t] + local_EI*r_inh[i_t-1] + \
p['muEE'] * longrange_E + I_bkg_exc + I_stim_exc[i_t]
I_inh = local_IE*s_n[i_t] + local_II*r_inh[i_t-1] + I_bkg_inh
d_r_inh = -r_inh[i_t-1] + self.fI(I_inh)
r_exc[i_t] = F(I_exc)
r_inh[i_t] = r_inh[i_t-1] + d_r_inh * dt/p['tau_inh']
#---------------------------------------------------------------------------------
# Plotting the results
#---------------------------------------------------------------------------------
_ = plt.figure(figsize=(4,4))
area_name_list = ['V1','V4','8m','8l','TEO','7A','9/46d','TEpd','24c']
area_idx_list = [-1]+[p['areas'].index(name) for name in area_name_list]
f, ax_list = plt.subplots(len(area_idx_list), sharex=True)
for ax, area_idx in zip(ax_list, area_idx_list):
if area_idx < 0:
y_plot = I_stim_exc[:, area_stim_idx]
txt = 'Input'
else:
y_plot = r_exc[:,area_idx]
txt = p['areas'][area_idx]
y_plot = y_plot - y_plot.min()
ax.plot(t_plot, y_plot)
ax.text(0.9, 0.6, txt, transform=ax.transAxes)
ax.set_yticks([y_plot.max()])
ax.set_yticklabels(['{:0.4f}'.format(y_plot.max())])
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
f.text(0.01, 0.5, 'Change in firing rate (Hz)', va='center', rotation='vertical')
ax.set_xlabel('Time (ms)')
if plotfile is not None:
#plt.savefig(plotfile)
print 'Figure saved at ' + plotfile
if __name__ == '__main__':
model = Model()
model.run_stimulus(plotfile=None) # Run stimulation to V1
|
xjwanglab/book
|
chaudhuri2015/chaudhuri2015_nonlinear.py
|
Python
|
mit
| 8,160
|
[
"NEURON"
] |
83ecde166a62e794bb3f5c32b65ceea92000a6a098426e716937a46d540ffc19
|
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
Custom_0028_vtk = GetActiveSource()
Threshold1 = Threshold()
Threshold1.Scalars = ['POINTS', 'pressure']
Threshold1.ThresholdRange = [-0.81475600000000004, 0.91933500000000001]
Threshold1.Scalars = ['POINTS', 'geom']
Threshold1.ThresholdRange = [0.17000000000000001, 1.0]
AnimationScene1 = GetAnimationScene()
RenderView1 = CreateRenderView()
RenderView1.HeadLightWarmth = 1.0
RenderView1.KeyLightIntensity = 1.0
RenderView1.UseLight = 1
RenderView1.FillLightWarmth = 1.0
RenderView1.CameraPosition = [7.4850001335144043, 0.73500001430511475, 29.058915388621521]
RenderView1.LightSwitch = 1
RenderView1.CameraClippingRange = [28.768326234735305, 29.494799119450846]
RenderView1.ViewTime = 0.0
RenderView1.LODThreshold = 5.0
RenderView1.BackLightWarmth = 1.0
RenderView1.CenterOfRotation = [7.4850001335144043, 0.73500001430511475, 0.0]
RenderView1.MaintainLuminance = 1
RenderView1.CameraFocalPoint = [7.4850001335144043, 0.73500001430511475, 0.0]
RenderView1.RenderInterruptsEnabled = 0
RenderView1.CameraParallelScale = 7.521000732597968
RenderView1.KeyLightWarmth = 1.0
a1_pressure_PVLookupTable = GetLookupTableForArray( "pressure", 1 )
DataRepresentation2 = Show()
DataRepresentation2.EdgeColor = [0.0, 0.0, 0.50000762951094835]
DataRepresentation2.ColorAttributeType = 'POINT_DATA'
DataRepresentation2.ScalarOpacityFunction = []
DataRepresentation2.ColorArrayName = 'pressure'
DataRepresentation2.ScalarOpacityUnitDistance = 0.57183913878223691
DataRepresentation2.LookupTable = a1_pressure_PVLookupTable
AnimationScene1.ViewModules = [ a2DRenderView1, RenderView1 ]
Delete(a2DRenderView1)
DataRepresentation1 = GetDisplayProperties(Custom_0028_vtk)
Delete(DataRepresentation1)
AnimationScene1.ViewModules = RenderView1
SetActiveSource(Custom_0028_vtk)
Threshold2 = Threshold()
Threshold2.Scalars = ['POINTS', 'pressure']
Threshold2.ThresholdRange = [-0.81475600000000004, 0.91933500000000001]
Threshold2.Scalars = ['POINTS', 'geom']
Threshold2.ThresholdRange = [0.0, 0.84999999999999998]
DataRepresentation3 = Show()
DataRepresentation3.EdgeColor = [0.0, 0.0, 0.50000762951094835]
DataRepresentation3.ColorAttributeType = 'POINT_DATA'
DataRepresentation3.ScalarOpacityFunction = []
DataRepresentation3.ColorArrayName = 'pressure'
DataRepresentation3.ScalarOpacityUnitDistance = 0.4135471980049702
DataRepresentation3.LookupTable = a1_pressure_PVLookupTable
DataRepresentation3.ColorArrayName = ''
DataRepresentation3.DiffuseColor = [0.66666666666666663, 0.0, 0.0]
SetActiveSource(Threshold1)
StreamTracer1 = StreamTracer( SeedType="Point Source" )
ScalarBarWidgetRepresentation1 = CreateScalarBar( Orientation='Horizontal', Title='pressure', Position2=[0.49999999999999972, 0.12999999999999989], Enabled=1, LabelFontSize=8, LookupTable=a1_pressure_PVLookupTable, TitleFontSize=10, Position=[0.25264106050305918, 0.15444121071012812] )
GetRenderView().Representations.append(ScalarBarWidgetRepresentation1)
a1_pressure_PVLookupTable.RGBPoints = [-0.092078699999999999, 0.0, 0.0, 1.0, 0.91933500000000001, 1.0, 0.0, 0.0]
StreamTracer1.SeedType.Center = [7.4850001335144043, 0.73500001430511475, 0.0]
StreamTracer1.SeedType.Radius = 1.4970000267028809
StreamTracer1.Vectors = ['POINTS', 'velocity']
StreamTracer1.SeedType = "Point Source"
StreamTracer1.MaximumStreamlineLength = 14.970000267028809
StreamTracer1.SeedType.NumberOfPoints = 100
StreamTracer1.MaximumStreamlineLength = 14.9700002670288
StreamTracer1.SeedType = "High Resolution Line Source"
DataRepresentation4 = Show()
DataRepresentation4.EdgeColor = [0.0, 0.0, 0.50000762951094835]
DataRepresentation4.ColorAttributeType = 'POINT_DATA'
DataRepresentation4.ColorArrayName = 'pressure'
DataRepresentation4.Texture = []
DataRepresentation4.LookupTable = a1_pressure_PVLookupTable
DataRepresentation2.Visibility = 0
StreamTracer1.SeedType.Point2 = [7.5, 0.0, 0.0]
StreamTracer1.SeedType.Resolution = 50
StreamTracer1.SeedType.Point1 = [15.0, 1.5, 0.0]
DataRepresentation2.Visibility = 1
SetActiveSource(Threshold1)
StreamTracer2 = StreamTracer( SeedType="Point Source" )
DataRepresentation4.ColorArrayName = ''
DataRepresentation4.DiffuseColor = [0.0, 0.0, 0.0]
StreamTracer2.SeedType.Center = [7.4850001335144043, 0.73500001430511475, 0.0]
StreamTracer2.SeedType.Radius = 1.4970000267028809
StreamTracer2.Vectors = ['POINTS', 'velocity']
StreamTracer2.SeedType = "Point Source"
StreamTracer2.MaximumStreamlineLength = 14.970000267028809
StreamTracer2.SeedType.NumberOfPoints = 100
StreamTracer2.MaximumStreamlineLength = 14.9700002670288
StreamTracer2.SeedType = "High Resolution Line Source"
DataRepresentation5 = Show()
DataRepresentation5.EdgeColor = [0.0, 0.0, 0.50000762951094835]
DataRepresentation5.ColorAttributeType = 'POINT_DATA'
DataRepresentation5.ColorArrayName = 'pressure'
DataRepresentation5.Texture = []
DataRepresentation5.LookupTable = a1_pressure_PVLookupTable
DataRepresentation2.Visibility = 0
StreamTracer2.SeedType.Point2 = [15.0, 0.75, 0.0]
StreamTracer2.SeedType.Resolution = 25
StreamTracer2.SeedType.Point1 = [0.0, 1.5, 0.0]
StreamTracer1.SeedType.Resolution = 25
DataRepresentation2.Visibility = 1
StreamTracer2.SeedType.Point2 = [0.0, 0.75, 0.0]
StreamTracer2.SeedType.Point1 = [15.0, 1.5, 0.0]
DataRepresentation5.ColorArrayName = ''
DataRepresentation5.DiffuseColor = [0.0, 0.0, 0.0]
Render()
|
kel85uk/NuSiF_CFD
|
skeleton/Parameter_files/post_process.py
|
Python
|
gpl-2.0
| 5,435
|
[
"ParaView"
] |
cfc87af5aaefe68d209a206315826464a9ee583d3adafe70d2e4dc7610204b7b
|
import logging
from celery import task
from mkt.feed.models import FeedApp, FeedCollection
log = logging.getLogger('z.feed')
@task
def _migrate_collection_colors(ids, model):
"""Migrate deprecated background color (hex) to color (name)."""
cls = FeedApp
if model == 'collection':
cls = FeedCollection
for obj in cls.objects.filter(id__in=ids):
if obj.background_color and not obj.color:
try:
color = {
'#CE001C': 'ruby',
'#F78813': 'amber',
'#00953F': 'emerald',
'#0099D0': 'aquamarine',
'#1E1E9C': 'sapphire',
'#5A197E': 'amethyst',
'#A20D55': 'garnet'
}.get(obj.background_color, 'aquamarine')
except KeyError:
continue
obj.update(color=color)
log.info('Migrated %s:%s from %s to %s' %
(model, unicode(obj.id), obj.background_color, color))
|
ingenioustechie/zamboni
|
mkt/feed/tasks.py
|
Python
|
bsd-3-clause
| 1,037
|
[
"Amber"
] |
08a09a4fb00d574f6a11b84810b13ad130f0473003fd0b3acbd92e657a172e30
|
# -*- coding: utf-8 -*-
"""
functions.py - Miscellaneous functions with no other home
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from __future__ import division
import warnings
import numpy as np
import decimal, re
import ctypes
import sys, struct
from .python2_3 import asUnicode, basestring
from .Qt import QtGui, QtCore, USE_PYSIDE
from . import getConfigOption, setConfigOptions
from . import debug
Colors = {
'b': QtGui.QColor(0,0,255,255),
'g': QtGui.QColor(0,255,0,255),
'r': QtGui.QColor(255,0,0,255),
'c': QtGui.QColor(0,255,255,255),
'm': QtGui.QColor(255,0,255,255),
'y': QtGui.QColor(255,255,0,255),
'k': QtGui.QColor(0,0,0,255),
'w': QtGui.QColor(255,255,255,255),
'd': QtGui.QColor(150,150,150,255),
'l': QtGui.QColor(200,200,200,255),
's': QtGui.QColor(100,100,150,255),
}
SI_PREFIXES = asUnicode('yzafpnµm kMGTPEZY')
SI_PREFIXES_ASCII = 'yzafpnum kMGTPEZY'
def siScale(x, minVal=1e-25, allowUnicode=True):
"""
Return the recommended scale factor and SI prefix string for x.
Example::
siScale(0.0001) # returns (1e6, 'μ')
# This indicates that the number 0.0001 is best represented as 0.0001 * 1e6 = 100 μUnits
"""
if isinstance(x, decimal.Decimal):
x = float(x)
try:
if np.isnan(x) or np.isinf(x):
return(1, '')
except:
print(x, type(x))
raise
if abs(x) < minVal:
m = 0
x = 0
else:
m = int(np.clip(np.floor(np.log(abs(x))/np.log(1000)), -9.0, 9.0))
if m == 0:
pref = ''
elif m < -8 or m > 8:
pref = 'e%d' % (m*3)
else:
if allowUnicode:
pref = SI_PREFIXES[m+8]
else:
pref = SI_PREFIXES_ASCII[m+8]
p = .001**m
return (p, pref)
def siFormat(x, precision=3, suffix='', space=True, error=None, minVal=1e-25, allowUnicode=True):
"""
Return the number x formatted in engineering notation with SI prefix.
Example::
siFormat(0.0001, suffix='V') # returns "100 μV"
"""
if space is True:
space = ' '
if space is False:
space = ''
(p, pref) = siScale(x, minVal, allowUnicode)
if not (len(pref) > 0 and pref[0] == 'e'):
pref = space + pref
if error is None:
fmt = "%." + str(precision) + "g%s%s"
return fmt % (x*p, pref, suffix)
else:
if allowUnicode:
plusminus = space + asUnicode("±") + space
else:
plusminus = " +/- "
fmt = "%." + str(precision) + "g%s%s%s%s"
return fmt % (x*p, pref, suffix, plusminus, siFormat(error, precision=precision, suffix=suffix, space=space, minVal=minVal))
def siEval(s):
"""
Convert a value written in SI notation to its equivalent prefixless value
Example::
siEval("100 μV") # returns 0.0001
"""
s = asUnicode(s)
m = re.match(r'(-?((\d+(\.\d*)?)|(\.\d+))([eE]-?\d+)?)\s*([u' + SI_PREFIXES + r']?).*$', s)
if m is None:
raise Exception("Can't convert string '%s' to number." % s)
v = float(m.groups()[0])
p = m.groups()[6]
#if p not in SI_PREFIXES:
#raise Exception("Can't convert string '%s' to number--unknown prefix." % s)
if p == '':
n = 0
elif p == 'u':
n = -2
else:
n = SI_PREFIXES.index(p) - 8
return v * 1000**n
class Color(QtGui.QColor):
def __init__(self, *args):
QtGui.QColor.__init__(self, mkColor(*args))
def glColor(self):
"""Return (r,g,b,a) normalized for use in opengl"""
return (self.red()/255., self.green()/255., self.blue()/255., self.alpha()/255.)
def __getitem__(self, ind):
return (self.red, self.green, self.blue, self.alpha)[ind]()
def mkColor(*args):
"""
Convenience function for constructing QColor from a variety of argument types. Accepted arguments are:
================ ================================================
'c' one of: r, g, b, c, m, y, k, w
R, G, B, [A] integers 0-255
(R, G, B, [A]) tuple of integers 0-255
float greyscale, 0.0-1.0
int see :func:`intColor() <pyqtgraph.intColor>`
(int, hues) see :func:`intColor() <pyqtgraph.intColor>`
"RGB" hexadecimal strings; may begin with '#'
"RGBA"
"RRGGBB"
"RRGGBBAA"
QColor QColor instance; makes a copy.
================ ================================================
"""
err = 'Not sure how to make a color from "%s"' % str(args)
if len(args) == 1:
if isinstance(args[0], basestring):
c = args[0]
if c[0] == '#':
c = c[1:]
if len(c) == 1:
try:
return Colors[c]
except KeyError:
raise Exception('No color named "%s"' % c)
if len(c) == 3:
r = int(c[0]*2, 16)
g = int(c[1]*2, 16)
b = int(c[2]*2, 16)
a = 255
elif len(c) == 4:
r = int(c[0]*2, 16)
g = int(c[1]*2, 16)
b = int(c[2]*2, 16)
a = int(c[3]*2, 16)
elif len(c) == 6:
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
a = 255
elif len(c) == 8:
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
a = int(c[6:8], 16)
elif isinstance(args[0], QtGui.QColor):
return QtGui.QColor(args[0])
elif isinstance(args[0], float):
r = g = b = int(args[0] * 255)
a = 255
elif hasattr(args[0], '__len__'):
if len(args[0]) == 3:
(r, g, b) = args[0]
a = 255
elif len(args[0]) == 4:
(r, g, b, a) = args[0]
elif len(args[0]) == 2:
return intColor(*args[0])
else:
raise Exception(err)
elif type(args[0]) == int:
return intColor(args[0])
else:
raise Exception(err)
elif len(args) == 3:
(r, g, b) = args
a = 255
elif len(args) == 4:
(r, g, b, a) = args
else:
raise Exception(err)
args = [r,g,b,a]
args = [0 if np.isnan(a) or np.isinf(a) else a for a in args]
args = list(map(int, args))
return QtGui.QColor(*args)
def mkBrush(*args, **kwds):
"""
| Convenience function for constructing Brush.
| This function always constructs a solid brush and accepts the same arguments as :func:`mkColor() <pyqtgraph.mkColor>`
| Calling mkBrush(None) returns an invisible brush.
"""
if 'color' in kwds:
color = kwds['color']
elif len(args) == 1:
arg = args[0]
if arg is None:
return QtGui.QBrush(QtCore.Qt.NoBrush)
elif isinstance(arg, QtGui.QBrush):
return QtGui.QBrush(arg)
else:
color = arg
elif len(args) > 1:
color = args
return QtGui.QBrush(mkColor(color))
def mkPen(*args, **kargs):
"""
Convenience function for constructing QPen.
Examples::
mkPen(color)
mkPen(color, width=2)
mkPen(cosmetic=False, width=4.5, color='r')
mkPen({'color': "FF0", width: 2})
mkPen(None) # (no pen)
In these examples, *color* may be replaced with any arguments accepted by :func:`mkColor() <pyqtgraph.mkColor>` """
color = kargs.get('color', None)
width = kargs.get('width', 1)
style = kargs.get('style', None)
dash = kargs.get('dash', None)
cosmetic = kargs.get('cosmetic', True)
hsv = kargs.get('hsv', None)
if len(args) == 1:
arg = args[0]
if isinstance(arg, dict):
return mkPen(**arg)
if isinstance(arg, QtGui.QPen):
return QtGui.QPen(arg) ## return a copy of this pen
elif arg is None:
style = QtCore.Qt.NoPen
else:
color = arg
if len(args) > 1:
color = args
if color is None:
color = mkColor('l')
if hsv is not None:
color = hsvColor(*hsv)
else:
color = mkColor(color)
pen = QtGui.QPen(QtGui.QBrush(color), width)
pen.setCosmetic(cosmetic)
if style is not None:
pen.setStyle(style)
if dash is not None:
pen.setDashPattern(dash)
return pen
def hsvColor(hue, sat=1.0, val=1.0, alpha=1.0):
"""Generate a QColor from HSVa values. (all arguments are float 0.0-1.0)"""
c = QtGui.QColor()
c.setHsvF(hue, sat, val, alpha)
return c
def colorTuple(c):
"""Return a tuple (R,G,B,A) from a QColor"""
return (c.red(), c.green(), c.blue(), c.alpha())
def colorStr(c):
"""Generate a hex string code from a QColor"""
return ('%02x'*4) % colorTuple(c)
def intColor(index, hues=9, values=1, maxValue=255, minValue=150, maxHue=360, minHue=0, sat=255, alpha=255, **kargs):
"""
Creates a QColor from a single index. Useful for stepping through a predefined list of colors.
The argument *index* determines which color from the set will be returned. All other arguments determine what the set of predefined colors will be
Colors are chosen by cycling across hues while varying the value (brightness).
By default, this selects from a list of 9 hues."""
hues = int(hues)
values = int(values)
ind = int(index) % (hues * values)
indh = ind % hues
indv = ind / hues
if values > 1:
v = minValue + indv * ((maxValue-minValue) / (values-1))
else:
v = maxValue
h = minHue + (indh * (maxHue-minHue)) / hues
c = QtGui.QColor()
c.setHsv(h, sat, v)
c.setAlpha(alpha)
return c
def glColor(*args, **kargs):
"""
Convert a color to OpenGL color format (r,g,b,a) floats 0.0-1.0
Accepts same arguments as :func:`mkColor <pyqtgraph.mkColor>`.
"""
c = mkColor(*args, **kargs)
return (c.red()/255., c.green()/255., c.blue()/255., c.alpha()/255.)
def makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):
"""
Construct a path outlining an arrow with the given dimensions.
The arrow points in the -x direction with tip positioned at 0,0.
If *tipAngle* is supplied (in degrees), it overrides *headWidth*.
If *tailLen* is None, no tail will be drawn.
"""
headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)
path = QtGui.QPainterPath()
path.moveTo(0,0)
path.lineTo(headLen, -headWidth)
if tailLen is None:
innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)
path.lineTo(innerY, 0)
else:
tailWidth *= 0.5
innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)
path.lineTo(innerY, -tailWidth)
path.lineTo(headLen + tailLen, -tailWidth)
path.lineTo(headLen + tailLen, tailWidth)
path.lineTo(innerY, tailWidth)
path.lineTo(headLen, headWidth)
path.lineTo(0,0)
return path
def eq(a, b):
"""The great missing equivalence function: Guaranteed evaluation to a single bool value."""
if a is b:
return True
try:
with warnings.catch_warnings(module=np): # ignore numpy futurewarning (numpy v. 1.10)
e = a==b
except ValueError:
return False
except AttributeError:
return False
except:
print('failed to evaluate equivalence for:')
print(" a:", str(type(a)), str(a))
print(" b:", str(type(b)), str(b))
raise
t = type(e)
if t is bool:
return e
elif t is np.bool_:
return bool(e)
elif isinstance(e, np.ndarray) or (hasattr(e, 'implements') and e.implements('MetaArray')):
try: ## disaster: if a is an empty array and b is not, then e.all() is True
if a.shape != b.shape:
return False
except:
return False
if (hasattr(e, 'implements') and e.implements('MetaArray')):
return e.asarray().all()
else:
return e.all()
else:
raise Exception("== operator returned type %s" % str(type(e)))
def affineSlice(data, shape, origin, vectors, axes, order=1, returnCoords=False, **kargs):
"""
Take a slice of any orientation through an array. This is useful for extracting sections of multi-dimensional arrays such as MRI images for viewing as 1D or 2D data.
The slicing axes are aribtrary; they do not need to be orthogonal to the original data or even to each other. It is possible to use this function to extract arbitrary linear, rectangular, or parallelepiped shapes from within larger datasets. The original data is interpolated onto a new array of coordinates using scipy.ndimage.map_coordinates if it is available (see the scipy documentation for more information about this). If scipy is not available, then a slower implementation of map_coordinates is used.
For a graphical interface to this function, see :func:`ROI.getArrayRegion <pyqtgraph.ROI.getArrayRegion>`
============== ====================================================================================================
**Arguments:**
*data* (ndarray) the original dataset
*shape* the shape of the slice to take (Note the return value may have more dimensions than len(shape))
*origin* the location in the original dataset that will become the origin of the sliced data.
*vectors* list of unit vectors which point in the direction of the slice axes. Each vector must have the same
length as *axes*. If the vectors are not unit length, the result will be scaled relative to the
original data. If the vectors are not orthogonal, the result will be sheared relative to the
original data.
*axes* The axes in the original dataset which correspond to the slice *vectors*
*order* The order of spline interpolation. Default is 1 (linear). See scipy.ndimage.map_coordinates
for more information.
*returnCoords* If True, return a tuple (result, coords) where coords is the array of coordinates used to select
values from the original dataset.
*All extra keyword arguments are passed to scipy.ndimage.map_coordinates.*
--------------------------------------------------------------------------------------------------------------------
============== ====================================================================================================
Note the following must be true:
| len(shape) == len(vectors)
| len(origin) == len(axes) == len(vectors[i])
Example: start with a 4D fMRI data set, take a diagonal-planar slice out of the last 3 axes
* data = array with dims (time, x, y, z) = (100, 40, 40, 40)
* The plane to pull out is perpendicular to the vector (x,y,z) = (1,1,1)
* The origin of the slice will be at (x,y,z) = (40, 0, 0)
* We will slice a 20x20 plane from each timepoint, giving a final shape (100, 20, 20)
The call for this example would look like::
affineSlice(data, shape=(20,20), origin=(40,0,0), vectors=((-1, 1, 0), (-1, 0, 1)), axes=(1,2,3))
"""
try:
import scipy.ndimage
have_scipy = True
except ImportError:
have_scipy = False
have_scipy = False
# sanity check
if len(shape) != len(vectors):
raise Exception("shape and vectors must have same length.")
if len(origin) != len(axes):
raise Exception("origin and axes must have same length.")
for v in vectors:
if len(v) != len(axes):
raise Exception("each vector must be same length as axes.")
shape = list(map(np.ceil, shape))
## transpose data so slice axes come first
trAx = list(range(data.ndim))
for x in axes:
trAx.remove(x)
tr1 = tuple(axes) + tuple(trAx)
data = data.transpose(tr1)
#print "tr1:", tr1
## dims are now [(slice axes), (other axes)]
## make sure vectors are arrays
if not isinstance(vectors, np.ndarray):
vectors = np.array(vectors)
if not isinstance(origin, np.ndarray):
origin = np.array(origin)
origin.shape = (len(axes),) + (1,)*len(shape)
## Build array of sample locations.
grid = np.mgrid[tuple([slice(0,x) for x in shape])] ## mesh grid of indexes
x = (grid[np.newaxis,...] * vectors.transpose()[(Ellipsis,) + (np.newaxis,)*len(shape)]).sum(axis=1) ## magic
x += origin
## iterate manually over unused axes since map_coordinates won't do it for us
if have_scipy:
extraShape = data.shape[len(axes):]
output = np.empty(tuple(shape) + extraShape, dtype=data.dtype)
for inds in np.ndindex(*extraShape):
ind = (Ellipsis,) + inds
output[ind] = scipy.ndimage.map_coordinates(data[ind], x, order=order, **kargs)
else:
# map_coordinates expects the indexes as the first axis, whereas
# interpolateArray expects indexes at the last axis.
tr = tuple(range(1,x.ndim)) + (0,)
output = interpolateArray(data, x.transpose(tr))
tr = list(range(output.ndim))
trb = []
for i in range(min(axes)):
ind = tr1.index(i) + (len(shape)-len(axes))
tr.remove(ind)
trb.append(ind)
tr2 = tuple(trb+tr)
## Untranspose array before returning
output = output.transpose(tr2)
if returnCoords:
return (output, x)
else:
return output
def interpolateArray(data, x, default=0.0):
"""
N-dimensional interpolation similar to scipy.ndimage.map_coordinates.
This function returns linearly-interpolated values sampled from a regular
grid of data.
*data* is an array of any shape containing the values to be interpolated.
*x* is an array with (shape[-1] <= data.ndim) containing the locations
within *data* to interpolate.
Returns array of shape (x.shape[:-1] + data.shape[x.shape[-1]:])
For example, assume we have the following 2D image data::
>>> data = np.array([[1, 2, 4 ],
[10, 20, 40 ],
[100, 200, 400]])
To compute a single interpolated point from this data::
>>> x = np.array([(0.5, 0.5)])
>>> interpolateArray(data, x)
array([ 8.25])
To compute a 1D list of interpolated locations::
>>> x = np.array([(0.5, 0.5),
(1.0, 1.0),
(1.0, 2.0),
(1.5, 0.0)])
>>> interpolateArray(data, x)
array([ 8.25, 20. , 40. , 55. ])
To compute a 2D array of interpolated locations::
>>> x = np.array([[(0.5, 0.5), (1.0, 2.0)],
[(1.0, 1.0), (1.5, 0.0)]])
>>> interpolateArray(data, x)
array([[ 8.25, 40. ],
[ 20. , 55. ]])
..and so on. The *x* argument may have any shape as long as
```x.shape[-1] <= data.ndim```. In the case that
```x.shape[-1] < data.ndim```, then the remaining axes are simply
broadcasted as usual. For example, we can interpolate one location
from an entire row of the data::
>>> x = np.array([[0.5]])
>>> interpolateArray(data, x)
array([[ 5.5, 11. , 22. ]])
This is useful for interpolating from arrays of colors, vertexes, etc.
"""
prof = debug.Profiler()
nd = data.ndim
md = x.shape[-1]
if md > nd:
raise TypeError("x.shape[-1] must be less than or equal to data.ndim")
# First we generate arrays of indexes that are needed to
# extract the data surrounding each point
fields = np.mgrid[(slice(0,2),) * md]
xmin = np.floor(x).astype(int)
xmax = xmin + 1
indexes = np.concatenate([xmin[np.newaxis, ...], xmax[np.newaxis, ...]])
fieldInds = []
totalMask = np.ones(x.shape[:-1], dtype=bool) # keep track of out-of-bound indexes
for ax in range(md):
mask = (xmin[...,ax] >= 0) & (x[...,ax] <= data.shape[ax]-1)
# keep track of points that need to be set to default
totalMask &= mask
# ..and keep track of indexes that are out of bounds
# (note that when x[...,ax] == data.shape[ax], then xmax[...,ax] will be out
# of bounds, but the interpolation will work anyway)
mask &= (xmax[...,ax] < data.shape[ax])
axisIndex = indexes[...,ax][fields[ax]]
axisIndex[axisIndex < 0] = 0
axisIndex[axisIndex >= data.shape[ax]] = 0
fieldInds.append(axisIndex)
prof()
# Get data values surrounding each requested point
fieldData = data[tuple(fieldInds)]
prof()
## Interpolate
s = np.empty((md,) + fieldData.shape, dtype=float)
dx = x - xmin
# reshape fields for arithmetic against dx
for ax in range(md):
f1 = fields[ax].reshape(fields[ax].shape + (1,)*(dx.ndim-1))
sax = f1 * dx[...,ax] + (1-f1) * (1-dx[...,ax])
sax = sax.reshape(sax.shape + (1,) * (s.ndim-1-sax.ndim))
s[ax] = sax
s = np.product(s, axis=0)
result = fieldData * s
for i in range(md):
result = result.sum(axis=0)
prof()
if totalMask.ndim > 0:
result[~totalMask] = default
else:
if totalMask is False:
result[:] = default
prof()
return result
def subArray(data, offset, shape, stride):
"""
Unpack a sub-array from *data* using the specified offset, shape, and stride.
Note that *stride* is specified in array elements, not bytes.
For example, we have a 2x3 array packed in a 1D array as follows::
data = [_, _, 00, 01, 02, _, 10, 11, 12, _]
Then we can unpack the sub-array with this call::
subArray(data, offset=2, shape=(2, 3), stride=(4, 1))
..which returns::
[[00, 01, 02],
[10, 11, 12]]
This function operates only on the first axis of *data*. So changing
the input in the example above to have shape (10, 7) would cause the
output to have shape (2, 3, 7).
"""
#data = data.flatten()
data = data[offset:]
shape = tuple(shape)
stride = tuple(stride)
extraShape = data.shape[1:]
#print data.shape, offset, shape, stride
for i in range(len(shape)):
mask = (slice(None),) * i + (slice(None, shape[i] * stride[i]),)
newShape = shape[:i+1]
if i < len(shape)-1:
newShape += (stride[i],)
newShape += extraShape
#print i, mask, newShape
#print "start:\n", data.shape, data
data = data[mask]
#print "mask:\n", data.shape, data
data = data.reshape(newShape)
#print "reshape:\n", data.shape, data
return data
def transformToArray(tr):
"""
Given a QTransform, return a 3x3 numpy array.
Given a QMatrix4x4, return a 4x4 numpy array.
Example: map an array of x,y coordinates through a transform::
## coordinates to map are (1,5), (2,6), (3,7), and (4,8)
coords = np.array([[1,2,3,4], [5,6,7,8], [1,1,1,1]]) # the extra '1' coordinate is needed for translation to work
## Make an example transform
tr = QtGui.QTransform()
tr.translate(3,4)
tr.scale(2, 0.1)
## convert to array
m = pg.transformToArray()[:2] # ignore the perspective portion of the transformation
## map coordinates through transform
mapped = np.dot(m, coords)
"""
#return np.array([[tr.m11(), tr.m12(), tr.m13()],[tr.m21(), tr.m22(), tr.m23()],[tr.m31(), tr.m32(), tr.m33()]])
## The order of elements given by the method names m11..m33 is misleading--
## It is most common for x,y translation to occupy the positions 1,3 and 2,3 in
## a transformation matrix. However, with QTransform these values appear at m31 and m32.
## So the correct interpretation is transposed:
if isinstance(tr, QtGui.QTransform):
return np.array([[tr.m11(), tr.m21(), tr.m31()], [tr.m12(), tr.m22(), tr.m32()], [tr.m13(), tr.m23(), tr.m33()]])
elif isinstance(tr, QtGui.QMatrix4x4):
return np.array(tr.copyDataTo()).reshape(4,4)
else:
raise Exception("Transform argument must be either QTransform or QMatrix4x4.")
def transformCoordinates(tr, coords, transpose=False):
"""
Map a set of 2D or 3D coordinates through a QTransform or QMatrix4x4.
The shape of coords must be (2,...) or (3,...)
The mapping will _ignore_ any perspective transformations.
For coordinate arrays with ndim=2, this is basically equivalent to matrix multiplication.
Most arrays, however, prefer to put the coordinate axis at the end (eg. shape=(...,3)). To
allow this, use transpose=True.
"""
if transpose:
## move last axis to beginning. This transposition will be reversed before returning the mapped coordinates.
coords = coords.transpose((coords.ndim-1,) + tuple(range(0,coords.ndim-1)))
nd = coords.shape[0]
if isinstance(tr, np.ndarray):
m = tr
else:
m = transformToArray(tr)
m = m[:m.shape[0]-1] # remove perspective
## If coords are 3D and tr is 2D, assume no change for Z axis
if m.shape == (2,3) and nd == 3:
m2 = np.zeros((3,4))
m2[:2, :2] = m[:2,:2]
m2[:2, 3] = m[:2,2]
m2[2,2] = 1
m = m2
## if coords are 2D and tr is 3D, ignore Z axis
if m.shape == (3,4) and nd == 2:
m2 = np.empty((2,3))
m2[:,:2] = m[:2,:2]
m2[:,2] = m[:2,3]
m = m2
## reshape tr and coords to prepare for multiplication
m = m.reshape(m.shape + (1,)*(coords.ndim-1))
coords = coords[np.newaxis, ...]
# separate scale/rotate and translation
translate = m[:,-1]
m = m[:, :-1]
## map coordinates and return
mapped = (m*coords).sum(axis=1) ## apply scale/rotate
mapped += translate
if transpose:
## move first axis to end.
mapped = mapped.transpose(tuple(range(1,mapped.ndim)) + (0,))
return mapped
def solve3DTransform(points1, points2):
"""
Find a 3D transformation matrix that maps points1 onto points2.
Points must be specified as either lists of 4 Vectors or
(4, 3) arrays.
"""
import numpy.linalg
pts = []
for inp in (points1, points2):
if isinstance(inp, np.ndarray):
A = np.empty((4,4), dtype=float)
A[:,:3] = inp[:,:3]
A[:,3] = 1.0
else:
A = np.array([[inp[i].x(), inp[i].y(), inp[i].z(), 1] for i in range(4)])
pts.append(A)
## solve 3 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((4,4))
for i in range(3):
## solve Ax = B; x is one row of the desired transformation matrix
matrix[i] = numpy.linalg.solve(pts[0], pts[1][:,i])
return matrix
def solveBilinearTransform(points1, points2):
"""
Find a bilinear transformation matrix (2x4) that maps points1 onto points2.
Points must be specified as a list of 4 Vector, Point, QPointF, etc.
To use this matrix to map a point [x,y]::
mapped = np.dot(matrix, [x*y, x, y, 1])
"""
import numpy.linalg
## A is 4 rows (points) x 4 columns (xy, x, y, 1)
## B is 4 rows (points) x 2 columns (x, y)
A = np.array([[points1[i].x()*points1[i].y(), points1[i].x(), points1[i].y(), 1] for i in range(4)])
B = np.array([[points2[i].x(), points2[i].y()] for i in range(4)])
## solve 2 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((2,4))
for i in range(2):
matrix[i] = numpy.linalg.solve(A, B[:,i]) ## solve Ax = B; x is one row of the desired transformation matrix
return matrix
def rescaleData(data, scale, offset, dtype=None, clip=None):
"""Return data rescaled and optionally cast to a new dtype::
data => (data-offset) * scale
"""
if dtype is None:
dtype = data.dtype
else:
dtype = np.dtype(dtype)
try:
if not getConfigOption('useWeave'):
raise Exception('Weave is disabled; falling back to slower version.')
try:
import scipy.weave
except ImportError:
raise Exception('scipy.weave is not importable; falling back to slower version.')
## require native dtype when using weave
if not data.dtype.isnative:
data = data.astype(data.dtype.newbyteorder('='))
if not dtype.isnative:
weaveDtype = dtype.newbyteorder('=')
else:
weaveDtype = dtype
newData = np.empty((data.size,), dtype=weaveDtype)
flat = np.ascontiguousarray(data).reshape(data.size)
size = data.size
code = """
double sc = (double)scale;
double off = (double)offset;
for( int i=0; i<size; i++ ) {
newData[i] = ((double)flat[i] - off) * sc;
}
"""
scipy.weave.inline(code, ['flat', 'newData', 'size', 'offset', 'scale'], compiler='gcc')
if dtype != weaveDtype:
newData = newData.astype(dtype)
data = newData.reshape(data.shape)
except:
if getConfigOption('useWeave'):
if getConfigOption('weaveDebug'):
debug.printExc("Error; disabling weave.")
setConfigOptions(useWeave=False)
#p = np.poly1d([scale, -offset*scale])
#d2 = p(data)
d2 = data - float(offset)
d2 *= scale
# Clip before converting dtype to avoid overflow
if dtype.kind in 'ui':
lim = np.iinfo(dtype)
if clip is None:
# don't let rescale cause integer overflow
d2 = np.clip(d2, lim.min, lim.max)
else:
d2 = np.clip(d2, max(clip[0], lim.min), min(clip[1], lim.max))
else:
if clip is not None:
d2 = np.clip(d2, *clip)
data = d2.astype(dtype)
return data
def applyLookupTable(data, lut):
"""
Uses values in *data* as indexes to select values from *lut*.
The returned data has shape data.shape + lut.shape[1:]
Note: color gradient lookup tables can be generated using GradientWidget.
"""
if data.dtype.kind not in ('i', 'u'):
data = data.astype(int)
return np.take(lut, data, axis=0, mode='clip')
def makeRGBA(*args, **kwds):
"""Equivalent to makeARGB(..., useRGBA=True)"""
kwds['useRGBA'] = True
return makeARGB(*args, **kwds)
def makeARGB(data, lut=None, levels=None, scale=None, useRGBA=False):
"""
Convert an array of values into an ARGB array suitable for building QImages,
OpenGL textures, etc.
Returns the ARGB array (unsigned byte) and a boolean indicating whether
there is alpha channel data. This is a two stage process:
1) Rescale the data based on the values in the *levels* argument (min, max).
2) Determine the final output by passing the rescaled values through a
lookup table.
Both stages are optional.
============== ==================================================================================
**Arguments:**
data numpy array of int/float types. If
levels List [min, max]; optionally rescale data before converting through the
lookup table. The data is rescaled such that min->0 and max->*scale*::
rescaled = (clip(data, min, max) - min) * (*scale* / (max - min))
It is also possible to use a 2D (N,2) array of values for levels. In this case,
it is assumed that each pair of min,max values in the levels array should be
applied to a different subset of the input data (for example, the input data may
already have RGB values and the levels are used to independently scale each
channel). The use of this feature requires that levels.shape[0] == data.shape[-1].
scale The maximum value to which data will be rescaled before being passed through the
lookup table (or returned if there is no lookup table). By default this will
be set to the length of the lookup table, or 255 if no lookup table is provided.
lut Optional lookup table (array with dtype=ubyte).
Values in data will be converted to color by indexing directly from lut.
The output data shape will be input.shape + lut.shape[1:].
Lookup tables can be built using ColorMap or GradientWidget.
useRGBA If True, the data is returned in RGBA order (useful for building OpenGL textures).
The default is False, which returns in ARGB order for use with QImage
(Note that 'ARGB' is a term used by the Qt documentation; the *actual* order
is BGRA).
============== ==================================================================================
"""
profile = debug.Profiler()
if data.ndim not in (2, 3):
raise TypeError("data must be 2D or 3D")
if data.ndim == 3 and data.shape[2] > 4:
raise TypeError("data.shape[2] must be <= 4")
if lut is not None and not isinstance(lut, np.ndarray):
lut = np.array(lut)
if levels is None:
# automatically decide levels based on data dtype
if data.dtype.kind == 'u':
levels = np.array([0, 2**(data.itemsize*8)-1])
elif data.dtype.kind == 'i':
s = 2**(data.itemsize*8 - 1)
levels = np.array([-s, s-1])
else:
raise Exception('levels argument is required for float input types')
if not isinstance(levels, np.ndarray):
levels = np.array(levels)
if levels.ndim == 1:
if levels.shape[0] != 2:
raise Exception('levels argument must have length 2')
elif levels.ndim == 2:
if lut is not None and lut.ndim > 1:
raise Exception('Cannot make ARGB data when both levels and lut have ndim > 2')
if levels.shape != (data.shape[-1], 2):
raise Exception('levels must have shape (data.shape[-1], 2)')
else:
raise Exception("levels argument must be 1D or 2D (got shape=%s)." % repr(levels.shape))
profile()
# Decide on maximum scaled value
if scale is None:
if lut is not None:
scale = lut.shape[0] - 1
else:
scale = 255.
# Decide on the dtype we want after scaling
if lut is None:
dtype = np.ubyte
else:
dtype = np.min_scalar_type(lut.shape[0]-1)
# Apply levels if given
if levels is not None:
if isinstance(levels, np.ndarray) and levels.ndim == 2:
# we are going to rescale each channel independently
if levels.shape[0] != data.shape[-1]:
raise Exception("When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])")
newData = np.empty(data.shape, dtype=int)
for i in range(data.shape[-1]):
minVal, maxVal = levels[i]
if minVal == maxVal:
maxVal += 1e-16
newData[...,i] = rescaleData(data[...,i], scale/(maxVal-minVal), minVal, dtype=dtype)
data = newData
else:
# Apply level scaling unless it would have no effect on the data
minVal, maxVal = levels
if minVal != 0 or maxVal != scale:
if minVal == maxVal:
maxVal += 1e-16
data = rescaleData(data, scale/(maxVal-minVal), minVal, dtype=dtype)
profile()
# apply LUT if given
if lut is not None:
data = applyLookupTable(data, lut)
else:
if data.dtype is not np.ubyte:
data = np.clip(data, 0, 255).astype(np.ubyte)
profile()
# this will be the final image array
imgData = np.empty(data.shape[:2]+(4,), dtype=np.ubyte)
profile()
# decide channel order
if useRGBA:
order = [0,1,2,3] # array comes out RGBA
else:
order = [2,1,0,3] # for some reason, the colors line up as BGR in the final image.
# copy data into image array
if data.ndim == 2:
# This is tempting:
# imgData[..., :3] = data[..., np.newaxis]
# ..but it turns out this is faster:
for i in range(3):
imgData[..., i] = data
elif data.shape[2] == 1:
for i in range(3):
imgData[..., i] = data[..., 0]
else:
for i in range(0, data.shape[2]):
imgData[..., i] = data[..., order[i]]
profile()
# add opaque alpha channel if needed
if data.ndim == 2 or data.shape[2] == 3:
alpha = False
imgData[..., 3] = 255
else:
alpha = True
profile()
return imgData, alpha
def makeQImage(imgData, alpha=None, copy=True, transpose=True):
"""
Turn an ARGB array into QImage.
By default, the data is copied; changes to the array will not
be reflected in the image. The image will be given a 'data' attribute
pointing to the array which shares its data to prevent python
freeing that memory while the image is in use.
============== ===================================================================
**Arguments:**
imgData Array of data to convert. Must have shape (width, height, 3 or 4)
and dtype=ubyte. The order of values in the 3rd axis must be
(b, g, r, a).
alpha If True, the QImage returned will have format ARGB32. If False,
the format will be RGB32. By default, _alpha_ is True if
array.shape[2] == 4.
copy If True, the data is copied before converting to QImage.
If False, the new QImage points directly to the data in the array.
Note that the array must be contiguous for this to work
(see numpy.ascontiguousarray).
transpose If True (the default), the array x/y axes are transposed before
creating the image. Note that Qt expects the axes to be in
(height, width) order whereas pyqtgraph usually prefers the
opposite.
============== ===================================================================
"""
## create QImage from buffer
profile = debug.Profiler()
## If we didn't explicitly specify alpha, check the array shape.
if alpha is None:
alpha = (imgData.shape[2] == 4)
copied = False
if imgData.shape[2] == 3: ## need to make alpha channel (even if alpha==False; QImage requires 32 bpp)
if copy is True:
d2 = np.empty(imgData.shape[:2] + (4,), dtype=imgData.dtype)
d2[:,:,:3] = imgData
d2[:,:,3] = 255
imgData = d2
copied = True
else:
raise Exception('Array has only 3 channels; cannot make QImage without copying.')
if alpha:
imgFormat = QtGui.QImage.Format_ARGB32
else:
imgFormat = QtGui.QImage.Format_RGB32
if transpose:
imgData = imgData.transpose((1, 0, 2)) ## QImage expects the row/column order to be opposite
profile()
if not imgData.flags['C_CONTIGUOUS']:
if copy is False:
extra = ' (try setting transpose=False)' if transpose else ''
raise Exception('Array is not contiguous; cannot make QImage without copying.'+extra)
imgData = np.ascontiguousarray(imgData)
copied = True
if copy is True and copied is False:
imgData = imgData.copy()
if USE_PYSIDE:
ch = ctypes.c_char.from_buffer(imgData, 0)
img = QtGui.QImage(ch, imgData.shape[1], imgData.shape[0], imgFormat)
else:
#addr = ctypes.addressof(ctypes.c_char.from_buffer(imgData, 0))
## PyQt API for QImage changed between 4.9.3 and 4.9.6 (I don't know exactly which version it was)
## So we first attempt the 4.9.6 API, then fall back to 4.9.3
#addr = ctypes.c_char.from_buffer(imgData, 0)
#try:
#img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)
#except TypeError:
#addr = ctypes.addressof(addr)
#img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)
try:
img = QtGui.QImage(imgData.ctypes.data, imgData.shape[1], imgData.shape[0], imgFormat)
except:
if copy:
# does not leak memory, is not mutable
img = QtGui.QImage(buffer(imgData), imgData.shape[1], imgData.shape[0], imgFormat)
else:
# mutable, but leaks memory
img = QtGui.QImage(memoryview(imgData), imgData.shape[1], imgData.shape[0], imgFormat)
img.data = imgData
return img
#try:
#buf = imgData.data
#except AttributeError: ## happens when image data is non-contiguous
#buf = imgData.data
#profiler()
#qimage = QtGui.QImage(buf, imgData.shape[1], imgData.shape[0], imgFormat)
#profiler()
#qimage.data = imgData
#return qimage
def imageToArray(img, copy=False, transpose=True):
"""
Convert a QImage into numpy array. The image must have format RGB32, ARGB32, or ARGB32_Premultiplied.
By default, the image is not copied; changes made to the array will appear in the QImage as well (beware: if
the QImage is collected before the array, there may be trouble).
The array will have shape (width, height, (b,g,r,a)).
"""
fmt = img.format()
ptr = img.bits()
if USE_PYSIDE:
arr = np.frombuffer(ptr, dtype=np.ubyte)
else:
ptr.setsize(img.byteCount())
arr = np.asarray(ptr)
if img.byteCount() != arr.size * arr.itemsize:
# Required for Python 2.6, PyQt 4.10
# If this works on all platforms, then there is no need to use np.asarray..
arr = np.frombuffer(ptr, np.ubyte, img.byteCount())
if fmt == img.Format_RGB32:
arr = arr.reshape(img.height(), img.width(), 3)
elif fmt == img.Format_ARGB32 or fmt == img.Format_ARGB32_Premultiplied:
arr = arr.reshape(img.height(), img.width(), 4)
if copy:
arr = arr.copy()
if transpose:
return arr.transpose((1,0,2))
else:
return arr
def colorToAlpha(data, color):
"""
Given an RGBA image in *data*, convert *color* to be transparent.
*data* must be an array (w, h, 3 or 4) of ubyte values and *color* must be
an array (3) of ubyte values.
This is particularly useful for use with images that have a black or white background.
Algorithm is taken from Gimp's color-to-alpha function in plug-ins/common/colortoalpha.c
Credit:
/*
* Color To Alpha plug-in v1.0 by Seth Burgess, sjburges@gimp.org 1999/05/14
* with algorithm by clahey
*/
"""
data = data.astype(float)
if data.shape[-1] == 3: ## add alpha channel if needed
d2 = np.empty(data.shape[:2]+(4,), dtype=data.dtype)
d2[...,:3] = data
d2[...,3] = 255
data = d2
color = color.astype(float)
alpha = np.zeros(data.shape[:2]+(3,), dtype=float)
output = data.copy()
for i in [0,1,2]:
d = data[...,i]
c = color[i]
mask = d > c
alpha[...,i][mask] = (d[mask] - c) / (255. - c)
imask = d < c
alpha[...,i][imask] = (c - d[imask]) / c
output[...,3] = alpha.max(axis=2) * 255.
mask = output[...,3] >= 1.0 ## avoid zero division while processing alpha channel
correction = 255. / output[...,3][mask] ## increase value to compensate for decreased alpha
for i in [0,1,2]:
output[...,i][mask] = ((output[...,i][mask]-color[i]) * correction) + color[i]
output[...,3][mask] *= data[...,3][mask] / 255. ## combine computed and previous alpha values
#raise Exception()
return np.clip(output, 0, 255).astype(np.ubyte)
def gaussianFilter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
if np.isscalar(sigma):
sigma = (sigma,) * data.ndim
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = sigma[ax]
if s == 0:
continue
# generate 1D gaussian kernel
ksize = int(s * 6)
x = np.arange(-ksize, ksize)
kernel = np.exp(-x**2 / (2*s**2))
kshape = [1,] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) *
np.fft.rfft(kernel, shape, axis=ax),
axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)
filtered = filtered[sl]
return filtered + baseline
def downsample(data, n, axis=0, xvals='subsample'):
"""Downsample by averaging points together across axis.
If multiple axes are specified, runs once per axis.
If a metaArray is given, then the axis values can be either subsampled
or downsampled to match.
"""
ma = None
if (hasattr(data, 'implements') and data.implements('MetaArray')):
ma = data
data = data.view(np.ndarray)
if hasattr(axis, '__len__'):
if not hasattr(n, '__len__'):
n = [n]*len(axis)
for i in range(len(axis)):
data = downsample(data, n[i], axis[i])
return data
if n <= 1:
return data
nPts = int(data.shape[axis] / n)
s = list(data.shape)
s[axis] = nPts
s.insert(axis+1, n)
sl = [slice(None)] * data.ndim
sl[axis] = slice(0, nPts*n)
d1 = data[tuple(sl)]
#print d1.shape, s
d1.shape = tuple(s)
d2 = d1.mean(axis+1)
if ma is None:
return d2
else:
info = ma.infoCopy()
if 'values' in info[axis]:
if xvals == 'subsample':
info[axis]['values'] = info[axis]['values'][::n][:nPts]
elif xvals == 'downsample':
info[axis]['values'] = downsample(info[axis]['values'], n)
return MetaArray(d2, info=info)
def arrayToQPath(x, y, connect='all'):
"""Convert an array of x,y coordinats to QPainterPath as efficiently as possible.
The *connect* argument may be 'all', indicating that each point should be
connected to the next; 'pairs', indicating that each pair of points
should be connected, or an array of int32 values (0 or 1) indicating
connections.
"""
## Create all vertices in path. The method used below creates a binary format so that all
## vertices can be read in at once. This binary format may change in future versions of Qt,
## so the original (slower) method is left here for emergencies:
#path.moveTo(x[0], y[0])
#if connect == 'all':
#for i in range(1, y.shape[0]):
#path.lineTo(x[i], y[i])
#elif connect == 'pairs':
#for i in range(1, y.shape[0]):
#if i%2 == 0:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#elif isinstance(connect, np.ndarray):
#for i in range(1, y.shape[0]):
#if connect[i] == 1:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#else:
#raise Exception('connect argument must be "all", "pairs", or array')
## Speed this up using >> operator
## Format is:
## numVerts(i4) 0(i4)
## x(f8) y(f8) 0(i4) <-- 0 means this vertex does not connect
## x(f8) y(f8) 1(i4) <-- 1 means this vertex connects to the previous vertex
## ...
## 0(i4)
##
## All values are big endian--pack using struct.pack('>d') or struct.pack('>i')
path = QtGui.QPainterPath()
#profiler = debug.Profiler()
n = x.shape[0]
# create empty array, pad with extra space on either end
arr = np.empty(n+2, dtype=[('x', '>f8'), ('y', '>f8'), ('c', '>i4')])
# write first two integers
#profiler('allocate empty')
byteview = arr.view(dtype=np.ubyte)
byteview[:12] = 0
byteview.data[12:20] = struct.pack('>ii', n, 0)
#profiler('pack header')
# Fill array with vertex values
arr[1:-1]['x'] = x
arr[1:-1]['y'] = y
# decide which points are connected by lines
if eq(connect, 'all'):
arr[1:-1]['c'] = 1
elif eq(connect, 'pairs'):
arr[1:-1]['c'][::2] = 1
arr[1:-1]['c'][1::2] = 0
elif eq(connect, 'finite'):
arr[1:-1]['c'] = np.isfinite(x) & np.isfinite(y)
elif isinstance(connect, np.ndarray):
arr[1:-1]['c'] = connect
else:
raise Exception('connect argument must be "all", "pairs", "finite", or array')
#profiler('fill array')
# write last 0
lastInd = 20*(n+1)
byteview.data[lastInd:lastInd+4] = struct.pack('>i', 0)
#profiler('footer')
# create datastream object and stream into path
## Avoiding this method because QByteArray(str) leaks memory in PySide
#buf = QtCore.QByteArray(arr.data[12:lastInd+4]) # I think one unnecessary copy happens here
path.strn = byteview.data[12:lastInd+4] # make sure data doesn't run away
try:
buf = QtCore.QByteArray.fromRawData(path.strn)
except TypeError:
buf = QtCore.QByteArray(bytes(path.strn))
#profiler('create buffer')
ds = QtCore.QDataStream(buf)
ds >> path
#profiler('load')
return path
#def isosurface(data, level):
#"""
#Generate isosurface from volumetric data using marching tetrahedra algorithm.
#See Paul Bourke, "Polygonising a Scalar Field Using Tetrahedrons" (http://local.wasp.uwa.edu.au/~pbourke/geometry/polygonise/)
#*data* 3D numpy array of scalar values
#*level* The level at which to generate an isosurface
#"""
#facets = []
### mark everything below the isosurface level
#mask = data < level
#### make eight sub-fields
#fields = np.empty((2,2,2), dtype=object)
#slices = [slice(0,-1), slice(1,None)]
#for i in [0,1]:
#for j in [0,1]:
#for k in [0,1]:
#fields[i,j,k] = mask[slices[i], slices[j], slices[k]]
### split each cell into 6 tetrahedra
### these all have the same 'orienation'; points 1,2,3 circle
### clockwise around point 0
#tetrahedra = [
#[(0,1,0), (1,1,1), (0,1,1), (1,0,1)],
#[(0,1,0), (0,1,1), (0,0,1), (1,0,1)],
#[(0,1,0), (0,0,1), (0,0,0), (1,0,1)],
#[(0,1,0), (0,0,0), (1,0,0), (1,0,1)],
#[(0,1,0), (1,0,0), (1,1,0), (1,0,1)],
#[(0,1,0), (1,1,0), (1,1,1), (1,0,1)]
#]
### each tetrahedron will be assigned an index
### which determines how to generate its facets.
### this structure is:
### facets[index][facet1, facet2, ...]
### where each facet is triangular and its points are each
### interpolated between two points on the tetrahedron
### facet = [(p1a, p1b), (p2a, p2b), (p3a, p3b)]
### facet points always circle clockwise if you are looking
### at them from below the isosurface.
#indexFacets = [
#[], ## all above
#[[(0,1), (0,2), (0,3)]], # 0 below
#[[(1,0), (1,3), (1,2)]], # 1 below
#[[(0,2), (1,3), (1,2)], [(0,2), (0,3), (1,3)]], # 0,1 below
#[[(2,0), (2,1), (2,3)]], # 2 below
#[[(0,3), (1,2), (2,3)], [(0,3), (0,1), (1,2)]], # 0,2 below
#[[(1,0), (2,3), (2,0)], [(1,0), (1,3), (2,3)]], # 1,2 below
#[[(3,0), (3,1), (3,2)]], # 3 above
#[[(3,0), (3,2), (3,1)]], # 3 below
#[[(1,0), (2,0), (2,3)], [(1,0), (2,3), (1,3)]], # 0,3 below
#[[(0,3), (2,3), (1,2)], [(0,3), (1,2), (0,1)]], # 1,3 below
#[[(2,0), (2,3), (2,1)]], # 0,1,3 below
#[[(0,2), (1,2), (1,3)], [(0,2), (1,3), (0,3)]], # 2,3 below
#[[(1,0), (1,2), (1,3)]], # 0,2,3 below
#[[(0,1), (0,3), (0,2)]], # 1,2,3 below
#[] ## all below
#]
#for tet in tetrahedra:
### get the 4 fields for this tetrahedron
#tetFields = [fields[c] for c in tet]
### generate an index for each grid cell
#index = tetFields[0] + tetFields[1]*2 + tetFields[2]*4 + tetFields[3]*8
### add facets
#for i in xrange(index.shape[0]): # data x-axis
#for j in xrange(index.shape[1]): # data y-axis
#for k in xrange(index.shape[2]): # data z-axis
#for f in indexFacets[index[i,j,k]]: # faces to generate for this tet
#pts = []
#for l in [0,1,2]: # points in this face
#p1 = tet[f[l][0]] # tet corner 1
#p2 = tet[f[l][1]] # tet corner 2
#pts.append([(p1[x]+p2[x])*0.5+[i,j,k][x]+0.5 for x in [0,1,2]]) ## interpolate between tet corners
#facets.append(pts)
#return facets
def isocurve(data, level, connected=False, extendToEdge=False, path=False):
"""
Generate isocurve from 2D data using marching squares algorithm.
============== =========================================================
**Arguments:**
data 2D numpy array of scalar values
level The level at which to generate an isosurface
connected If False, return a single long list of point pairs
If True, return multiple long lists of connected point
locations. (This is slower but better for drawing
continuous lines)
extendToEdge If True, extend the curves to reach the exact edges of
the data.
path if True, return a QPainterPath rather than a list of
vertex coordinates. This forces connected=True.
============== =========================================================
This function is SLOW; plenty of room for optimization here.
"""
if path is True:
connected = True
if extendToEdge:
d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype)
d2[1:-1, 1:-1] = data
d2[0, 1:-1] = data[0]
d2[-1, 1:-1] = data[-1]
d2[1:-1, 0] = data[:, 0]
d2[1:-1, -1] = data[:, -1]
d2[0,0] = d2[0,1]
d2[0,-1] = d2[1,-1]
d2[-1,0] = d2[-1,1]
d2[-1,-1] = d2[-1,-2]
data = d2
sideTable = [
[],
[0,1],
[1,2],
[0,2],
[0,3],
[1,3],
[0,1,2,3],
[2,3],
[2,3],
[0,1,2,3],
[1,3],
[0,3],
[0,2],
[1,2],
[0,1],
[]
]
edgeKey=[
[(0,1), (0,0)],
[(0,0), (1,0)],
[(1,0), (1,1)],
[(1,1), (0,1)]
]
lines = []
## mark everything below the isosurface level
mask = data < level
### make four sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2,2), dtype=object)
slices = [slice(0,-1), slice(1,None)]
for i in [0,1]:
for j in [0,1]:
fields[i,j] = mask[slices[i], slices[j]]
#vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme
vertIndex = i+2*j
#print i,j,k," : ", fields[i,j,k], 2**vertIndex
np.add(index, fields[i,j] * 2**vertIndex, out=index, casting='unsafe')
#print index
#print index
## add lines
for i in range(index.shape[0]): # data x-axis
for j in range(index.shape[1]): # data y-axis
sides = sideTable[index[i,j]]
for l in range(0, len(sides), 2): ## faces for this grid cell
edges = sides[l:l+2]
pts = []
for m in [0,1]: # points in this face
p1 = edgeKey[edges[m]][0] # p1, p2 are points at either side of an edge
p2 = edgeKey[edges[m]][1]
v1 = data[i+p1[0], j+p1[1]] # v1 and v2 are the values at p1 and p2
v2 = data[i+p2[0], j+p2[1]]
f = (level-v1) / (v2-v1)
fi = 1.0 - f
p = ( ## interpolate between corners
p1[0]*fi + p2[0]*f + i + 0.5,
p1[1]*fi + p2[1]*f + j + 0.5
)
if extendToEdge:
## check bounds
p = (
min(data.shape[0]-2, max(0, p[0]-1)),
min(data.shape[1]-2, max(0, p[1]-1)),
)
if connected:
gridKey = i + (1 if edges[m]==2 else 0), j + (1 if edges[m]==3 else 0), edges[m]%2
pts.append((p, gridKey)) ## give the actual position and a key identifying the grid location (for connecting segments)
else:
pts.append(p)
lines.append(pts)
if not connected:
return lines
## turn disjoint list of segments into continuous lines
#lines = [[2,5], [5,4], [3,4], [1,3], [6,7], [7,8], [8,6], [11,12], [12,15], [11,13], [13,14]]
#lines = [[(float(a), a), (float(b), b)] for a,b in lines]
points = {} ## maps each point to its connections
for a,b in lines:
if a[1] not in points:
points[a[1]] = []
points[a[1]].append([a,b])
if b[1] not in points:
points[b[1]] = []
points[b[1]].append([b,a])
## rearrange into chains
for k in list(points.keys()):
try:
chains = points[k]
except KeyError: ## already used this point elsewhere
continue
#print "===========", k
for chain in chains:
#print " chain:", chain
x = None
while True:
if x == chain[-1][1]:
break ## nothing left to do on this chain
x = chain[-1][1]
if x == k:
break ## chain has looped; we're done and can ignore the opposite chain
y = chain[-2][1]
connects = points[x]
for conn in connects[:]:
if conn[1][1] != y:
#print " ext:", conn
chain.extend(conn[1:])
#print " del:", x
del points[x]
if chain[0][1] == chain[-1][1]: # looped chain; no need to continue the other direction
chains.pop()
break
## extract point locations
lines = []
for chain in points.values():
if len(chain) == 2:
chain = chain[1][1:][::-1] + chain[0] # join together ends of chain
else:
chain = chain[0]
lines.append([p[0] for p in chain])
if not path:
return lines ## a list of pairs of points
path = QtGui.QPainterPath()
for line in lines:
path.moveTo(*line[0])
for p in line[1:]:
path.lineTo(*p)
return path
def traceImage(image, values, smooth=0.5):
"""
Convert an image to a set of QPainterPath curves.
One curve will be generated for each item in *values*; each curve outlines the area
of the image that is closer to its value than to any others.
If image is RGB or RGBA, then the shape of values should be (nvals, 3/4)
The parameter *smooth* is expressed in pixels.
"""
try:
import scipy.ndimage as ndi
except ImportError:
raise Exception("traceImage() requires the package scipy.ndimage, but it is not importable.")
if values.ndim == 2:
values = values.T
values = values[np.newaxis, np.newaxis, ...].astype(float)
image = image[..., np.newaxis].astype(float)
diff = np.abs(image-values)
if values.ndim == 4:
diff = diff.sum(axis=2)
labels = np.argmin(diff, axis=2)
paths = []
for i in range(diff.shape[-1]):
d = (labels==i).astype(float)
d = gaussianFilter(d, (smooth, smooth))
lines = isocurve(d, 0.5, connected=True, extendToEdge=True)
path = QtGui.QPainterPath()
for line in lines:
path.moveTo(*line[0])
for p in line[1:]:
path.lineTo(*p)
paths.append(path)
return paths
IsosurfaceDataCache = None
def isosurface(data, level):
"""
Generate isosurface from volumetric data using marching cubes algorithm.
See Paul Bourke, "Polygonising a Scalar Field"
(http://paulbourke.net/geometry/polygonise/)
*data* 3D numpy array of scalar values
*level* The level at which to generate an isosurface
Returns an array of vertex coordinates (Nv, 3) and an array of
per-face vertex indexes (Nf, 3)
"""
## For improvement, see:
##
## Efficient implementation of Marching Cubes' cases with topological guarantees.
## Thomas Lewiner, Helio Lopes, Antonio Wilson Vieira and Geovan Tavares.
## Journal of Graphics Tools 8(2): pp. 1-15 (december 2003)
## Precompute lookup tables on the first run
global IsosurfaceDataCache
if IsosurfaceDataCache is None:
## map from grid cell index to edge index.
## grid cell index tells us which corners are below the isosurface,
## edge index tells us which edges are cut by the isosurface.
## (Data stolen from Bourk; see above.)
edgeTable = np.array([
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
], dtype=np.uint16)
## Table of triangles to use for filling each grid cell.
## Each set of three integers tells us which three edges to
## draw a triangle between.
## (Data stolen from Bourk; see above.)
triTable = [
[],
[0, 8, 3],
[0, 1, 9],
[1, 8, 3, 9, 8, 1],
[1, 2, 10],
[0, 8, 3, 1, 2, 10],
[9, 2, 10, 0, 2, 9],
[2, 8, 3, 2, 10, 8, 10, 9, 8],
[3, 11, 2],
[0, 11, 2, 8, 11, 0],
[1, 9, 0, 2, 3, 11],
[1, 11, 2, 1, 9, 11, 9, 8, 11],
[3, 10, 1, 11, 10, 3],
[0, 10, 1, 0, 8, 10, 8, 11, 10],
[3, 9, 0, 3, 11, 9, 11, 10, 9],
[9, 8, 10, 10, 8, 11],
[4, 7, 8],
[4, 3, 0, 7, 3, 4],
[0, 1, 9, 8, 4, 7],
[4, 1, 9, 4, 7, 1, 7, 3, 1],
[1, 2, 10, 8, 4, 7],
[3, 4, 7, 3, 0, 4, 1, 2, 10],
[9, 2, 10, 9, 0, 2, 8, 4, 7],
[2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4],
[8, 4, 7, 3, 11, 2],
[11, 4, 7, 11, 2, 4, 2, 0, 4],
[9, 0, 1, 8, 4, 7, 2, 3, 11],
[4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1],
[3, 10, 1, 3, 11, 10, 7, 8, 4],
[1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4],
[4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3],
[4, 7, 11, 4, 11, 9, 9, 11, 10],
[9, 5, 4],
[9, 5, 4, 0, 8, 3],
[0, 5, 4, 1, 5, 0],
[8, 5, 4, 8, 3, 5, 3, 1, 5],
[1, 2, 10, 9, 5, 4],
[3, 0, 8, 1, 2, 10, 4, 9, 5],
[5, 2, 10, 5, 4, 2, 4, 0, 2],
[2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8],
[9, 5, 4, 2, 3, 11],
[0, 11, 2, 0, 8, 11, 4, 9, 5],
[0, 5, 4, 0, 1, 5, 2, 3, 11],
[2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5],
[10, 3, 11, 10, 1, 3, 9, 5, 4],
[4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10],
[5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3],
[5, 4, 8, 5, 8, 10, 10, 8, 11],
[9, 7, 8, 5, 7, 9],
[9, 3, 0, 9, 5, 3, 5, 7, 3],
[0, 7, 8, 0, 1, 7, 1, 5, 7],
[1, 5, 3, 3, 5, 7],
[9, 7, 8, 9, 5, 7, 10, 1, 2],
[10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3],
[8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2],
[2, 10, 5, 2, 5, 3, 3, 5, 7],
[7, 9, 5, 7, 8, 9, 3, 11, 2],
[9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11],
[2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7],
[11, 2, 1, 11, 1, 7, 7, 1, 5],
[9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11],
[5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0],
[11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0],
[11, 10, 5, 7, 11, 5],
[10, 6, 5],
[0, 8, 3, 5, 10, 6],
[9, 0, 1, 5, 10, 6],
[1, 8, 3, 1, 9, 8, 5, 10, 6],
[1, 6, 5, 2, 6, 1],
[1, 6, 5, 1, 2, 6, 3, 0, 8],
[9, 6, 5, 9, 0, 6, 0, 2, 6],
[5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8],
[2, 3, 11, 10, 6, 5],
[11, 0, 8, 11, 2, 0, 10, 6, 5],
[0, 1, 9, 2, 3, 11, 5, 10, 6],
[5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11],
[6, 3, 11, 6, 5, 3, 5, 1, 3],
[0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6],
[3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9],
[6, 5, 9, 6, 9, 11, 11, 9, 8],
[5, 10, 6, 4, 7, 8],
[4, 3, 0, 4, 7, 3, 6, 5, 10],
[1, 9, 0, 5, 10, 6, 8, 4, 7],
[10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4],
[6, 1, 2, 6, 5, 1, 4, 7, 8],
[1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7],
[8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6],
[7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9],
[3, 11, 2, 7, 8, 4, 10, 6, 5],
[5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11],
[0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6],
[9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6],
[8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6],
[5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11],
[0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7],
[6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9],
[10, 4, 9, 6, 4, 10],
[4, 10, 6, 4, 9, 10, 0, 8, 3],
[10, 0, 1, 10, 6, 0, 6, 4, 0],
[8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10],
[1, 4, 9, 1, 2, 4, 2, 6, 4],
[3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4],
[0, 2, 4, 4, 2, 6],
[8, 3, 2, 8, 2, 4, 4, 2, 6],
[10, 4, 9, 10, 6, 4, 11, 2, 3],
[0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6],
[3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10],
[6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1],
[9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3],
[8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1],
[3, 11, 6, 3, 6, 0, 0, 6, 4],
[6, 4, 8, 11, 6, 8],
[7, 10, 6, 7, 8, 10, 8, 9, 10],
[0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10],
[10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0],
[10, 6, 7, 10, 7, 1, 1, 7, 3],
[1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7],
[2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9],
[7, 8, 0, 7, 0, 6, 6, 0, 2],
[7, 3, 2, 6, 7, 2],
[2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7],
[2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7],
[1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11],
[11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1],
[8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6],
[0, 9, 1, 11, 6, 7],
[7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0],
[7, 11, 6],
[7, 6, 11],
[3, 0, 8, 11, 7, 6],
[0, 1, 9, 11, 7, 6],
[8, 1, 9, 8, 3, 1, 11, 7, 6],
[10, 1, 2, 6, 11, 7],
[1, 2, 10, 3, 0, 8, 6, 11, 7],
[2, 9, 0, 2, 10, 9, 6, 11, 7],
[6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8],
[7, 2, 3, 6, 2, 7],
[7, 0, 8, 7, 6, 0, 6, 2, 0],
[2, 7, 6, 2, 3, 7, 0, 1, 9],
[1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6],
[10, 7, 6, 10, 1, 7, 1, 3, 7],
[10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8],
[0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7],
[7, 6, 10, 7, 10, 8, 8, 10, 9],
[6, 8, 4, 11, 8, 6],
[3, 6, 11, 3, 0, 6, 0, 4, 6],
[8, 6, 11, 8, 4, 6, 9, 0, 1],
[9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6],
[6, 8, 4, 6, 11, 8, 2, 10, 1],
[1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6],
[4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9],
[10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3],
[8, 2, 3, 8, 4, 2, 4, 6, 2],
[0, 4, 2, 4, 6, 2],
[1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8],
[1, 9, 4, 1, 4, 2, 2, 4, 6],
[8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1],
[10, 1, 0, 10, 0, 6, 6, 0, 4],
[4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3],
[10, 9, 4, 6, 10, 4],
[4, 9, 5, 7, 6, 11],
[0, 8, 3, 4, 9, 5, 11, 7, 6],
[5, 0, 1, 5, 4, 0, 7, 6, 11],
[11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5],
[9, 5, 4, 10, 1, 2, 7, 6, 11],
[6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5],
[7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2],
[3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6],
[7, 2, 3, 7, 6, 2, 5, 4, 9],
[9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7],
[3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0],
[6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8],
[9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7],
[1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4],
[4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10],
[7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10],
[6, 9, 5, 6, 11, 9, 11, 8, 9],
[3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5],
[0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11],
[6, 11, 3, 6, 3, 5, 5, 3, 1],
[1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6],
[0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10],
[11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5],
[6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3],
[5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2],
[9, 5, 6, 9, 6, 0, 0, 6, 2],
[1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8],
[1, 5, 6, 2, 1, 6],
[1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6],
[10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0],
[0, 3, 8, 5, 6, 10],
[10, 5, 6],
[11, 5, 10, 7, 5, 11],
[11, 5, 10, 11, 7, 5, 8, 3, 0],
[5, 11, 7, 5, 10, 11, 1, 9, 0],
[10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1],
[11, 1, 2, 11, 7, 1, 7, 5, 1],
[0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11],
[9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7],
[7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2],
[2, 5, 10, 2, 3, 5, 3, 7, 5],
[8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5],
[9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2],
[9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2],
[1, 3, 5, 3, 7, 5],
[0, 8, 7, 0, 7, 1, 1, 7, 5],
[9, 0, 3, 9, 3, 5, 5, 3, 7],
[9, 8, 7, 5, 9, 7],
[5, 8, 4, 5, 10, 8, 10, 11, 8],
[5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0],
[0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5],
[10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4],
[2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8],
[0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11],
[0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5],
[9, 4, 5, 2, 11, 3],
[2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4],
[5, 10, 2, 5, 2, 4, 4, 2, 0],
[3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9],
[5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2],
[8, 4, 5, 8, 5, 3, 3, 5, 1],
[0, 4, 5, 1, 0, 5],
[8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5],
[9, 4, 5],
[4, 11, 7, 4, 9, 11, 9, 10, 11],
[0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11],
[1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11],
[3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4],
[4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2],
[9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3],
[11, 7, 4, 11, 4, 2, 2, 4, 0],
[11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4],
[2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9],
[9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7],
[3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10],
[1, 10, 2, 8, 7, 4],
[4, 9, 1, 4, 1, 7, 7, 1, 3],
[4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1],
[4, 0, 3, 7, 4, 3],
[4, 8, 7],
[9, 10, 8, 10, 11, 8],
[3, 0, 9, 3, 9, 11, 11, 9, 10],
[0, 1, 10, 0, 10, 8, 8, 10, 11],
[3, 1, 10, 11, 3, 10],
[1, 2, 11, 1, 11, 9, 9, 11, 8],
[3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9],
[0, 2, 11, 8, 0, 11],
[3, 2, 11],
[2, 3, 8, 2, 8, 10, 10, 8, 9],
[9, 10, 2, 0, 9, 2],
[2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8],
[1, 10, 2],
[1, 3, 8, 9, 1, 8],
[0, 9, 1],
[0, 3, 8],
[]
]
edgeShifts = np.array([ ## maps edge ID (0-11) to (x,y,z) cell offset and edge ID (0-2)
[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 1, 1],
[0, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 2],
[1, 0, 0, 2],
[1, 1, 0, 2],
[0, 1, 0, 2],
#[9, 9, 9, 9] ## fake
], dtype=np.uint16) # don't use ubyte here! This value gets added to cell index later; will need the extra precision.
nTableFaces = np.array([len(f)/3 for f in triTable], dtype=np.ubyte)
faceShiftTables = [None]
for i in range(1,6):
## compute lookup table of index: vertexes mapping
faceTableI = np.zeros((len(triTable), i*3), dtype=np.ubyte)
faceTableInds = np.argwhere(nTableFaces == i)
faceTableI[faceTableInds[:,0]] = np.array([triTable[j] for j in faceTableInds])
faceTableI = faceTableI.reshape((len(triTable), i, 3))
faceShiftTables.append(edgeShifts[faceTableI])
## Let's try something different:
#faceTable = np.empty((256, 5, 3, 4), dtype=np.ubyte) # (grid cell index, faces, vertexes, edge lookup)
#for i,f in enumerate(triTable):
#f = np.array(f + [12] * (15-len(f))).reshape(5,3)
#faceTable[i] = edgeShifts[f]
IsosurfaceDataCache = (faceShiftTables, edgeShifts, edgeTable, nTableFaces)
else:
faceShiftTables, edgeShifts, edgeTable, nTableFaces = IsosurfaceDataCache
## mark everything below the isosurface level
mask = data < level
### make eight sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2,2,2), dtype=object)
slices = [slice(0,-1), slice(1,None)]
for i in [0,1]:
for j in [0,1]:
for k in [0,1]:
fields[i,j,k] = mask[slices[i], slices[j], slices[k]]
vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme
np.add(index, fields[i,j,k] * 2**vertIndex, out=index, casting='unsafe')
### Generate table of edges that have been cut
cutEdges = np.zeros([x+1 for x in index.shape]+[3], dtype=np.uint32)
edges = edgeTable[index]
for i, shift in enumerate(edgeShifts[:12]):
slices = [slice(shift[j],cutEdges.shape[j]+(shift[j]-1)) for j in range(3)]
cutEdges[slices[0], slices[1], slices[2], shift[3]] += edges & 2**i
## for each cut edge, interpolate to see where exactly the edge is cut and generate vertex positions
m = cutEdges > 0
vertexInds = np.argwhere(m) ## argwhere is slow!
vertexes = vertexInds[:,:3].astype(np.float32)
dataFlat = data.reshape(data.shape[0]*data.shape[1]*data.shape[2])
## re-use the cutEdges array as a lookup table for vertex IDs
cutEdges[vertexInds[:,0], vertexInds[:,1], vertexInds[:,2], vertexInds[:,3]] = np.arange(vertexInds.shape[0])
for i in [0,1,2]:
vim = vertexInds[:,3] == i
vi = vertexInds[vim, :3]
viFlat = (vi * (np.array(data.strides[:3]) // data.itemsize)[np.newaxis,:]).sum(axis=1)
v1 = dataFlat[viFlat]
v2 = dataFlat[viFlat + data.strides[i]//data.itemsize]
vertexes[vim,i] += (level-v1) / (v2-v1)
### compute the set of vertex indexes for each face.
## This works, but runs a bit slower.
#cells = np.argwhere((index != 0) & (index != 255)) ## all cells with at least one face
#cellInds = index[cells[:,0], cells[:,1], cells[:,2]]
#verts = faceTable[cellInds]
#mask = verts[...,0,0] != 9
#verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges
#verts = verts[mask]
#faces = cutEdges[verts[...,0], verts[...,1], verts[...,2], verts[...,3]] ## and these are the vertex indexes we want.
## To allow this to be vectorized efficiently, we count the number of faces in each
## grid cell and handle each group of cells with the same number together.
## determine how many faces to assign to each grid cell
nFaces = nTableFaces[index]
totFaces = nFaces.sum()
faces = np.empty((totFaces, 3), dtype=np.uint32)
ptr = 0
#import debug
#p = debug.Profiler()
## this helps speed up an indexing operation later on
cs = np.array(cutEdges.strides)//cutEdges.itemsize
cutEdges = cutEdges.flatten()
## this, strangely, does not seem to help.
#ins = np.array(index.strides)/index.itemsize
#index = index.flatten()
for i in range(1,6):
### expensive:
#profiler()
cells = np.argwhere(nFaces == i) ## all cells which require i faces (argwhere is expensive)
#profiler()
if cells.shape[0] == 0:
continue
cellInds = index[cells[:,0], cells[:,1], cells[:,2]] ## index values of cells to process for this round
#profiler()
### expensive:
verts = faceShiftTables[i][cellInds]
#profiler()
np.add(verts[...,:3], cells[:,np.newaxis,np.newaxis,:], out=verts[...,:3], casting='unsafe') ## we now have indexes into cutEdges
verts = verts.reshape((verts.shape[0]*i,)+verts.shape[2:])
#profiler()
### expensive:
verts = (verts * cs[np.newaxis, np.newaxis, :]).sum(axis=2)
vertInds = cutEdges[verts]
#profiler()
nv = vertInds.shape[0]
#profiler()
faces[ptr:ptr+nv] = vertInds #.reshape((nv, 3))
#profiler()
ptr += nv
return vertexes, faces
def invertQTransform(tr):
"""Return a QTransform that is the inverse of *tr*.
Rasises an exception if tr is not invertible.
Note that this function is preferred over QTransform.inverted() due to
bugs in that method. (specifically, Qt has floating-point precision issues
when determining whether a matrix is invertible)
"""
try:
import numpy.linalg
arr = np.array([[tr.m11(), tr.m12(), tr.m13()], [tr.m21(), tr.m22(), tr.m23()], [tr.m31(), tr.m32(), tr.m33()]])
inv = numpy.linalg.inv(arr)
return QtGui.QTransform(inv[0,0], inv[0,1], inv[0,2], inv[1,0], inv[1,1], inv[1,2], inv[2,0], inv[2,1])
except ImportError:
inv = tr.inverted()
if inv[1] is False:
raise Exception("Transform is not invertible.")
return inv[0]
def pseudoScatter(data, spacing=None, shuffle=True, bidir=False):
"""
Used for examining the distribution of values in a set. Produces scattering as in beeswarm or column scatter plots.
Given a list of x-values, construct a set of y-values such that an x,y scatter-plot
will not have overlapping points (it will look similar to a histogram).
"""
inds = np.arange(len(data))
if shuffle:
np.random.shuffle(inds)
data = data[inds]
if spacing is None:
spacing = 2.*np.std(data)/len(data)**0.5
s2 = spacing**2
yvals = np.empty(len(data))
if len(data) == 0:
return yvals
yvals[0] = 0
for i in range(1,len(data)):
x = data[i] # current x value to be placed
x0 = data[:i] # all x values already placed
y0 = yvals[:i] # all y values already placed
y = 0
dx = (x0-x)**2 # x-distance to each previous point
xmask = dx < s2 # exclude anything too far away
if xmask.sum() > 0:
if bidir:
dirs = [-1, 1]
else:
dirs = [1]
yopts = []
for direction in dirs:
y = 0
dx2 = dx[xmask]
dy = (s2 - dx2)**0.5
limits = np.empty((2,len(dy))) # ranges of y-values to exclude
limits[0] = y0[xmask] - dy
limits[1] = y0[xmask] + dy
while True:
# ignore anything below this y-value
if direction > 0:
mask = limits[1] >= y
else:
mask = limits[0] <= y
limits2 = limits[:,mask]
# are we inside an excluded region?
mask = (limits2[0] < y) & (limits2[1] > y)
if mask.sum() == 0:
break
if direction > 0:
y = limits2[:,mask].max()
else:
y = limits2[:,mask].min()
yopts.append(y)
if bidir:
y = yopts[0] if -yopts[0] < yopts[1] else yopts[1]
else:
y = yopts[0]
yvals[i] = y
return yvals[np.argsort(inds)] ## un-shuffle values before returning
def toposort(deps, nodes=None, seen=None, stack=None, depth=0):
"""Topological sort. Arguments are:
deps dictionary describing dependencies where a:[b,c] means "a depends on b and c"
nodes optional, specifies list of starting nodes (these should be the nodes
which are not depended on by any other nodes). Other candidate starting
nodes will be ignored.
Example::
# Sort the following graph:
#
# B ──┬─────> C <── D
# │ │
# E <─┴─> A <─┘
#
deps = {'a': ['b', 'c'], 'c': ['b', 'd'], 'e': ['b']}
toposort(deps)
=> ['b', 'd', 'c', 'a', 'e']
"""
# fill in empty dep lists
deps = deps.copy()
for k,v in list(deps.items()):
for k in v:
if k not in deps:
deps[k] = []
if nodes is None:
## run through deps to find nodes that are not depended upon
rem = set()
for dep in deps.values():
rem |= set(dep)
nodes = set(deps.keys()) - rem
if seen is None:
seen = set()
stack = []
sorted = []
for n in nodes:
if n in stack:
raise Exception("Cyclic dependency detected", stack + [n])
if n in seen:
continue
seen.add(n)
sorted.extend( toposort(deps, deps[n], seen, stack+[n], depth=depth+1))
sorted.append(n)
return sorted
|
mpvismer/pyqtgraph
|
pyqtgraph/functions.py
|
Python
|
mit
| 88,140
|
[
"Gaussian"
] |
52fcea0d0fd60005be9db11230a39776293788a243308dbc97bb72a81de536b6
|
"""
=============================
Generic SpectralModel wrapper
=============================
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
"""
import numpy as np
from pyspeckit.mpfit import mpfit,mpfitException
from pyspeckit.spectrum.parinfo import ParinfoList,Parinfo
import copy
from astropy import log
import matplotlib.cbook as mpcb
from . import fitter
from . import mpfit_messages
from pyspeckit.specwarnings import warn
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
except ImportError:
warn( "OrderedDict is required for modeling. If you have python <2.7, install the ordereddict module." )
class SpectralModel(fitter.SimpleFitter):
"""
A wrapper class for a spectra model. Includes internal functions to
generate multi-component models, annotations, integrals, and individual
components. The declaration can be complex, since you should name
individual variables, set limits on them, set the units the fit will be
performed in, and set the annotations to be used. Check out some
of the hyperfine codes (hcn, n2hp) for examples.
"""
def __init__(self, modelfunc, npars,
shortvarnames=("A","\\Delta x","\\sigma"),
fitunits=None,
centroid_par=None,
fwhm_func=None,
fwhm_pars=None,
integral_func=None,
use_lmfit=False, **kwargs):
"""
Spectral Model Initialization
Create a Spectral Model class for data fitting
Parameters
----------
modelfunc : function
the model function to be fitted. Should take an X-axis
(spectroscopic axis) as an input followed by input parameters.
Returns an array with the same shape as the input X-axis
npars : int
number of parameters required by the model
parnames : list (optional)
a list or tuple of the parameter names
parvalues : list (optional)
the initial guesses for the input parameters (defaults to ZEROS)
parlimits : list (optional)
the upper/lower limits for each variable (defaults to ZEROS)
parfixed : list (optional)
Can declare any variables to be fixed (defaults to ZEROS)
parerror : list (optional)
technically an output parameter... hmm (defaults to ZEROS)
partied : list (optional)
not the past tense of party. Can declare, via text, that
some parameters are tied to each other. Defaults to zeros like the
others, but it's not clear if that's a sensible default
fitunits : str (optional)
convert X-axis to these units before passing to model
parsteps : list (optional)
minimum step size for each paremeter (defaults to ZEROS)
npeaks : list (optional)
default number of peaks to assume when fitting (can be overridden)
shortvarnames : list (optional)
TeX names of the variables to use when annotating
Returns
-------
A tuple containing (model best-fit parameters, the model, parameter
errors, chi^2 value)
"""
self.modelfunc = modelfunc
if self.__doc__ is None:
self.__doc__ = modelfunc.__doc__
elif modelfunc.__doc__ is not None:
self.__doc__ += modelfunc.__doc__
self.npars = npars
self.default_npars = npars
self.fitunits = fitunits
# this needs to be set once only
self.shortvarnames = shortvarnames
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
self.use_lmfit = use_lmfit
# default name of parameter that represents the profile centroid
self.centroid_par = centroid_par
# FWHM function and parameters
self.fwhm_func = fwhm_func
self.fwhm_pars = fwhm_pars
# analytic integral function
self.integral_func = integral_func
def __copy__(self):
# http://stackoverflow.com/questions/1500718/what-is-the-right-way-to-override-the-copy-deepcopy-operations-on-an-object-in-p
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def __call__(self, *args, **kwargs):
use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else self.use_lmfit
if use_lmfit:
return self.lmfitter(*args,**kwargs)
return self.fitter(*args,**kwargs)
def make_parinfo(self, **kwargs):
return self._make_parinfo(**kwargs)[0]
def _make_parinfo(self, params=None, parnames=None, parvalues=None,
parlimits=None, parlimited=None, parfixed=None,
parerror=None, partied=None, fitunits=None,
parsteps=None, npeaks=1, parinfo=None, names=None,
values=None, limits=None, limited=None, fixed=None,
error=None, tied=None, steps=None, negamp=None,
limitedmin=None, limitedmax=None, minpars=None,
maxpars=None, vheight=False, debug=False, **kwargs):
"""
Generate a `ParinfoList` that matches the inputs
This code is complicated - it can take inputs in a variety of different
forms with different priority. It will return a `ParinfoList` (and
therefore must have values within parameter ranges)
"""
# for backwards compatibility - partied = tied, etc.
locals_dict = locals()
for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",","):
shortvarname = varname.replace("par","")
if locals_dict.get(shortvarname) is not None and locals_dict.get(varname) is not None:
raise ValueError("Cannot specify both {0} and {1}".format(varname, shortvarname))
input_pardict = {k: locals_dict.get(k)
for k in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",",")}
_tip = {'par'+k: locals_dict.get(k)
for k in str.split("names,values,steps,limits,limited,fixed,error,tied",",")
if locals_dict.get(k)
}
input_pardict.update(_tip)
if params is not None and parvalues is not None:
raise ValueError("parvalues and params both specified; they're redundant so that's not allowed.")
elif params is not None and parvalues is None:
input_pardict['parvalues'] = params
log.debug("Parvalues = {0}, npeaks = {1}".format(input_pardict['parvalues'], npeaks))
# this is used too many damned times to keep referencing a dict.
parnames = input_pardict['parnames']
parlimited = input_pardict['parlimited']
parlimits = input_pardict['parlimits']
parvalues = input_pardict['parvalues']
if parnames is not None:
self.parnames = parnames
elif parnames is None and hasattr(self,'parnames') and self.parnames is not None:
parnames = self.parnames
elif self.default_parinfo is not None and parnames is None:
parnames = [p['parname'] for p in self.default_parinfo]
input_pardict['parnames'] = parnames
assert input_pardict['parnames'] is not None
if limitedmin is not None:
if limitedmax is not None:
parlimited = list(zip(limitedmin,limitedmax))
else:
parlimited = list(zip(limitedmin,(False,)*len(parnames)))
elif limitedmax is not None:
parlimited = list(zip((False,)*len(parnames),limitedmax))
elif self.default_parinfo is not None and parlimited is None:
parlimited = [p['limited'] for p in self.default_parinfo]
input_pardict['parlimited'] = parlimited
if minpars is not None:
if maxpars is not None:
parlimits = list(zip(minpars,maxpars))
else:
parlimits = list(zip(minpars,(False,)*len(parnames)))
elif maxpars is not None:
parlimits = list(zip((False,)*len(parnames),maxpars))
elif limits is not None:
parlimits = limits
elif self.default_parinfo is not None and parlimits is None:
parlimits = [p['limits'] for p in self.default_parinfo]
input_pardict['parlimits'] = parlimits
self.npeaks = int(npeaks)
# the height / parvalue popping needs to be done before the temp_pardict is set in order to make sure
# that the height guess isn't assigned to the amplitude
self.vheight = vheight
if ((vheight and len(self.parinfo) == self.default_npars and
len(parvalues) == self.default_npars + 1)):
# if the right number of parameters are passed, the first is the height
self.parinfo = [{'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':""}]
elif vheight and len(self.parinfo) == self.default_npars and len(parvalues) == self.default_npars:
# if you're one par short, guess zero
self.parinfo = [ {'n':0, 'value': 0, 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':"" } ]
elif vheight and len(self.parinfo) == self.default_npars+1 and len(parvalues) == self.default_npars+1:
# the right numbers are passed *AND* there is already a height param
self.parinfo = [ {'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':"" } ]
#heightparnum = (i for i,s in self.parinfo if 'HEIGHT' in s['parname'])
#for hpn in heightparnum:
# self.parinfo[hpn]['value'] = parvalues[0]
elif vheight:
raise ValueError('VHEIGHT is specified but a case was found that did not allow it to be included.')
else:
self.parinfo = []
log.debug("After VHEIGHT parse len(parinfo): %i vheight: %s" % (len(self.parinfo), vheight))
# this is a clever way to turn the parameter lists into a dict of lists
# clever = hard to read
temp_pardict = OrderedDict([(varname, np.zeros(self.npars*self.npeaks,
dtype='bool'))
if input_pardict.get(varname) is None else
(varname, list(input_pardict.get(varname)))
for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",",")])
temp_pardict['parlimits'] = parlimits if parlimits is not None else [(0,0)] * (self.npars*self.npeaks)
temp_pardict['parlimited'] = parlimited if parlimited is not None else [(False,False)] * (self.npars*self.npeaks)
for k,v in temp_pardict.items():
if (self.npars*self.npeaks) / len(v) > 1:
n_components = ((self.npars*self.npeaks) / len(v))
if n_components != int(n_components):
raise ValueError("The number of parameter values is not a "
"multiple of the number of allowed "
"parameters.")
temp_pardict[k] = list(v) * int(n_components)
# generate the parinfo dict
# note that 'tied' must be a blank string (i.e. ""), not False, if it is not set
# parlimited, parfixed, and parlimits are all two-element items (tuples or lists)
self.parinfo += [ {'n':ii+self.npars*jj+vheight,
'value':float(temp_pardict['parvalues'][ii+self.npars*jj]),
'step':temp_pardict['parsteps'][ii+self.npars*jj],
'limits':temp_pardict['parlimits'][ii+self.npars*jj],
'limited':temp_pardict['parlimited'][ii+self.npars*jj],
'fixed':temp_pardict['parfixed'][ii+self.npars*jj],
'parname':temp_pardict['parnames'][ii].upper()+"%0i" % int(jj),
'error':float(temp_pardict['parerror'][ii+self.npars*jj]),
'tied':temp_pardict['partied'][ii+self.npars*jj] if temp_pardict['partied'][ii+self.npars*jj] else ""}
for jj in range(self.npeaks)
for ii in range(self.npars) ] # order matters!
log.debug("After Generation step len(parinfo): %i vheight: %s "
"parinfo: %s" % (len(self.parinfo), vheight, self.parinfo))
if debug > True:
import pdb; pdb.set_trace()
# special keyword to specify emission/absorption lines
if negamp is not None:
if negamp:
for p in self.parinfo:
if 'AMP' in p['parname']:
p['limited'] = (p['limited'][0], True)
p['limits'] = (p['limits'][0], 0)
else:
for p in self.parinfo:
if 'AMP' in p['parname']:
p['limited'] = (True, p['limited'][1])
p['limits'] = (0, p['limits'][1])
# This is effectively an override of all that junk above (3/11/2012)
# Much of it is probably unnecessary, but it was easier to do this than
# rewrite the above
self.parinfo = ParinfoList([Parinfo(p) for p in self.parinfo])
# New feature: scaleability
for par in self.parinfo:
if par.parname.lower().strip('0123456789') in ('amplitude','amp'):
par.scaleable = True
log.debug("Parinfo has been set: {0}".format(self.parinfo))
log.debug("kwargs {0} were passed.".format(kwargs))
assert self.parinfo != []
return self.parinfo, kwargs
def n_modelfunc(self, pars=None, debug=False, **kwargs):
"""
Simple wrapper to deal with N independent peaks for a given spectral model
"""
if pars is None:
pars = self.parinfo
elif not isinstance(pars, ParinfoList):
try:
partemp = copy.copy(self.parinfo)
partemp._from_Parameters(pars)
pars = partemp
except AttributeError:
log.log(5, "Reading pars {0} as LMPar failed.".format(pars))
if debug > 1:
import pdb; pdb.set_trace()
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = list(zip(*list(pars.items())))
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
else:
parvals = list(pars)
log.debug("pars to n_modelfunc: {0}, parvals:{1}".format(pars, parvals))
def L(x):
v = np.zeros(len(x))
if self.vheight:
v += parvals[0]
# use len(pars) instead of self.npeaks because we want this to work
# independent of the current best fit
for jj in range(int((len(parvals)-self.vheight)/self.npars)):
lower_parind = jj*self.npars+self.vheight
upper_parind = (jj+1)*self.npars+self.vheight
v += self.modelfunc(x, *parvals[lower_parind:upper_parind], **kwargs)
return v
return L
def mpfitfun(self,x,y,err=None):
"""
Wrapper function to compute the fit residuals in an mpfit-friendly format
"""
if err is None:
def f(p,fjac=None):
residuals = (y-self.n_modelfunc(p, **self.modelfunc_kwargs)(x))
return [0,residuals]
else:
def f(p,fjac=None):
residuals = (y-self.n_modelfunc(p, **self.modelfunc_kwargs)(x))/err
return [0,residuals]
return f
def lmfitfun(self,x,y,err=None,debug=False):
"""
Wrapper function to compute the fit residuals in an lmfit-friendly format
"""
def f(p):
#pars = [par.value for par in p.values()]
kwargs = {}
kwargs.update(self.modelfunc_kwargs)
log.debug("Pars, kwarg keys: {0},{1}".format(p,list(kwargs.keys())))
if err is None:
return (y-self.n_modelfunc(p,**kwargs)(x))
else:
return (y-self.n_modelfunc(p,**kwargs)(x))/err
return f
def lmfitter(self, xax, data, err=None, parinfo=None, quiet=True, debug=False, **kwargs):
"""
Use lmfit instead of mpfit to do the fitting
Parameters
----------
xax : SpectroscopicAxis
The X-axis of the spectrum
data : ndarray
The data to fit
err : ndarray (optional)
The error on the data. If unspecified, will be uniform unity
parinfo : ParinfoList
The guesses, parameter limits, etc. See
`pyspeckit.spectrum.parinfo` for details
quiet : bool
If false, print out some messages about the fitting
"""
try:
import lmfit
except ImportError as e:
raise ImportError( "Could not import lmfit, try using mpfit instead." )
self.xax = xax # the 'stored' xax is just a link to the original
if hasattr(xax,'convert_to_unit') and self.fitunits is not None:
# some models will depend on the input units. For these, pass in an X-axis in those units
# (gaussian, voigt, lorentz profiles should not depend on units. Ammonia, formaldehyde,
# H-alpha, etc. should)
xax = copy.copy(xax)
xax.convert_to_unit(self.fitunits, quiet=quiet)
elif self.fitunits is not None:
raise TypeError("X axis does not have a convert method")
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
err[np.isnan(data) + np.isinf(data)] = np.inf
data[np.isnan(data) + np.isinf(data)] = 0
if np.any(np.isnan(err)):
raise ValueError("One or more of the error values is NaN."
" This is not allowed. Errors can be infinite "
"(which is equivalent to giving zero weight to "
"a data point), but otherwise they must be positive "
"floats.")
elif np.any(err<0):
raise ValueError("At least one error value is negative, which is "
"not allowed as negative errors are not "
"meaningful in the optimization process.")
if parinfo is None:
parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
log.debug("Parinfo created from _make_parinfo: {0}".format(parinfo))
LMParams = parinfo.as_Parameters()
log.debug("LMParams: "+"\n".join([repr(p) for p in list(LMParams.values())]))
log.debug("parinfo: {0}".format(parinfo))
minimizer = lmfit.minimize(self.lmfitfun(xax,np.array(data),err,debug=debug),LMParams,**kwargs)
if not quiet:
log.info("There were %i function evaluations" % (minimizer.nfev))
#modelpars = [p.value for p in parinfo.values()]
#modelerrs = [p.stderr for p in parinfo.values() if p.stderr is not None else 0]
self.LMParams = LMParams
self.parinfo._from_Parameters(LMParams)
log.debug("LMParams: {0}".format(LMParams))
log.debug("parinfo: {0}".format(parinfo))
self.mp = minimizer
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
modelkwargs = {}
modelkwargs.update(self.modelfunc_kwargs)
self.model = self.n_modelfunc(self.parinfo, **modelkwargs)(xax)
if hasattr(minimizer,'chisqr'):
chi2 = minimizer.chisqr
else:
try:
chi2 = (((data-self.model)/err)**2).sum()
except TypeError:
chi2 = ((data-self.model)**2).sum()
if np.isnan(chi2):
warn( "Warning: chi^2 is nan" )
if hasattr(self.mp,'ier') and self.mp.ier not in [1,2,3,4]:
log.warning("Fitter failed: %s, %s" % (self.mp.message, self.mp.lmdif_message))
return self.mpp,self.model,self.mpperr,chi2
def fitter(self, xax, data, err=None, quiet=True, veryverbose=False,
debug=False, parinfo=None, **kwargs):
"""
Run the fitter using mpfit.
kwargs will be passed to _make_parinfo and mpfit.
Parameters
----------
xax : SpectroscopicAxis
The X-axis of the spectrum
data : ndarray
The data to fit
err : ndarray (optional)
The error on the data. If unspecified, will be uniform unity
parinfo : ParinfoList
The guesses, parameter limits, etc. See
`pyspeckit.spectrum.parinfo` for details
quiet : bool
pass to mpfit. If False, will print out the parameter values for
each iteration of the fitter
veryverbose : bool
print out a variety of mpfit output parameters
debug : bool
raise an exception (rather than a warning) if chi^2 is nan
"""
if parinfo is None:
parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
else:
log.debug("Using user-specified parinfo dict")
# clean out disallowed kwargs (don't want to pass them to mpfit)
#throwaway, kwargs = self._make_parinfo(debug=debug, **kwargs)
self.xax = xax # the 'stored' xax is just a link to the original
if hasattr(xax,'as_unit') and self.fitunits is not None:
# some models will depend on the input units. For these, pass in an X-axis in those units
# (gaussian, voigt, lorentz profiles should not depend on units. Ammonia, formaldehyde,
# H-alpha, etc. should)
xax = copy.copy(xax)
# xax.convert_to_unit(self.fitunits, quiet=quiet)
xax = xax.as_unit(self.fitunits, quiet=quiet, **kwargs)
elif self.fitunits is not None:
raise TypeError("X axis does not have a convert method")
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
err[np.isnan(data) + np.isinf(data)] = np.inf
data[np.isnan(data) + np.isinf(data)] = 0
if np.any(np.isnan(err)):
raise ValueError("One or more of the error values is NaN."
" This is not allowed. Errors can be infinite "
"(which is equivalent to giving zero weight to "
"a data point), but otherwise they must be positive "
"floats.")
elif np.any(err<0):
raise ValueError("At least one error value is negative, which is "
"not allowed as negative errors are not "
"meaningful in the optimization process.")
for p in parinfo: log.debug( p )
log.debug( "\n".join(["%s %i: tied: %s value: %s" % (p['parname'],p['n'],p['tied'],p['value']) for p in parinfo]) )
mp = mpfit(self.mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet,debug=debug,**kwargs)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
if "parameters are not within PARINFO limits" in mp.errmsg:
log.warn( parinfo )
raise mpfitException(mp.errmsg)
for i,(p,e) in enumerate(zip(mpp,mpperr)):
self.parinfo[i]['value'] = p
self.parinfo[i]['error'] = e
if veryverbose:
log.info("Fit status: {0}".format(mp.status))
log.info("Fit error message: {0}".format(mp.errmsg))
log.info("Fit message: {0}".format(mpfit_messages[mp.status]))
for i,p in enumerate(mpp):
log.info("{0}: {1} +/- {2}".format(self.parinfo[i]['parname'],
p,mpperr[i]))
log.info("Chi2: {0} Reduced Chi2: {1} DOF:{2}".format(mp.fnorm,
mp.fnorm/(len(data)-len(mpp)),
len(data)-len(mpp)))
self.mp = mp
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_modelfunc(self.parinfo,**self.modelfunc_kwargs)(xax)
log.debug("Modelpars: {0}".format(self.mpp))
if np.isnan(chi2):
if debug:
raise ValueError("Error: chi^2 is nan")
else:
log.warn("Warning: chi^2 is nan")
return mpp,self.model,mpperr,chi2
def slope(self, xinp):
"""
Find the local slope of the model at location x
(x must be in xax's units)
"""
if hasattr(self, 'model'):
dm = np.diff(self.model)
# convert requested x to pixels
xpix = self.xax.x_to_pix(xinp)
dmx = np.average(dm[xpix-1:xpix+1])
if np.isfinite(dmx):
return dmx
else:
return 0
def annotations(self, shortvarnames=None, debug=False):
"""
Return a list of TeX-formatted labels
The values and errors are formatted so that only the significant digits
are displayed. Rounding is performed using the decimal package.
Parameters
----------
shortvarnames : list
A list of variable names (tex is allowed) to include in the
annotations. Defaults to self.shortvarnames
Examples
--------
>>> # Annotate a Gaussian
>>> sp.specfit.annotate(shortvarnames=['A','\\Delta x','\\sigma'])
"""
from decimal import Decimal # for formatting
svn = self.shortvarnames if shortvarnames is None else shortvarnames
# if pars need to be replicated....
if len(svn) < self.npeaks*self.npars:
svn = svn * self.npeaks
parvals = self.parinfo.values
parerrs = self.parinfo.errors
loop_list = [(parvals[ii+jj*self.npars+self.vheight],
parerrs[ii+jj*self.npars+self.vheight],
svn[ii+jj*self.npars],
self.parinfo.fixed[ii+jj*self.npars+self.vheight],
jj)
for jj in range(self.npeaks) for ii in range(self.npars)]
label_list = []
for (value, error, varname, fixed, varnumber) in loop_list:
log.debug(", ".join([str(x) for x in (value, error, varname, fixed, varnumber)]))
if fixed or error==0:
label = ("$%s(%i)$=%8s" % (varname,varnumber,
Decimal("%g" % value).quantize( Decimal("%0.6g" % (value)) )))
else:
label = ("$%s(%i)$=%8s $\\pm$ %8s" % (varname,varnumber,
Decimal("%g" % value).quantize( Decimal("%0.2g" % (min(np.abs([value,error])))) ),
Decimal("%g" % error).quantize(Decimal("%0.2g" % (error))),))
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def components(self, xarr, pars, **kwargs):
"""
Return a numpy ndarray of shape [npeaks x modelshape] of the
independent components of the fits
"""
modelcomponents = np.array(
[self.modelfunc(xarr,
*pars[i*self.npars:(i+1)*self.npars],
**dict(list(self.modelfunc_kwargs.items())+list(kwargs.items())))
for i in range(self.npeaks)])
if len(modelcomponents.shape) == 3:
newshape = [modelcomponents.shape[0]*modelcomponents.shape[1], modelcomponents.shape[2]]
modelcomponents = np.reshape(modelcomponents, newshape)
return modelcomponents
def integral(self, modelpars, dx=None, **kwargs):
"""
Extremely simple integrator:
IGNORES modelpars;
just sums self.model
"""
if not hasattr(self,'model'):
raise ValueError("Must fit (or compute) a model before computing"
" its integral.")
if dx is not None:
return (self.model*dx).sum()
else:
return self.model.sum()
def analytic_integral(self, modelpars=None, npeaks=None, npars=None):
"""
Placeholder for analyic integrals; these must be defined for individual models
"""
if self.integral_func is None:
raise NotImplementedError("Analytic integrals must be implemented independently for each model type")
# all of these parameters are allowed to be overwritten
if modelpars is None:
modelpars = self.parinfo.values
if npeaks is None:
npeaks = self.npeaks
if npars is None:
npars = self.npars
return np.sum([
self.integral_func(modelpars[npars*ii:npars*(1+ii)])
for ii in range(npeaks)])
def component_integrals(self, xarr, dx=None):
"""
Compute the integrals of each component
"""
components = self.components(xarr, self.parinfo.values)
if dx is None:
dx = 1
integrals = [com.sum()*dx for com in components]
return integrals
def analytic_fwhm(self, parinfo=None):
"""
Return the FWHMa of the model components *if* a fwhm_func has been
defined
Done with incomprehensible list comprehensions instead of nested for
loops... readability sacrificed for speed and simplicity. This is
unpythonic.
"""
if self.fwhm_func is None and self.fwhm_pars is None:
raise TypeError("fwhm_func not implemented for model %s" % self.__name__)
if parinfo is None:
parinfo = self.parinfo
fwhm = [self.fwhm_func(
*[self.parinfo[str.upper(p+'%i' % n)] for p in self.fwhm_pars]
)
for n in range(self.npeaks)]
return fwhm
def analytic_centroids(self, centroidpar=None):
"""
Return the *analytic* centroids of the model components
Parameters
----------
centroidpar : None or string
The name of the parameter in the fit that represents the centroid
*some models have default centroid parameters - these will be used
if centroidpar is unspecified*
Returns
-------
List of the centroid values (even if there's only 1)
"""
if centroidpar is None:
centroidpar = self.centroid_par
centr = [par.value
for par in self.parinfo
if str.upper(centroidpar) in par.parname]
return centr
def computed_centroid(self, xarr=None):
"""
Return the *computed* centroid of the model
Parameters
----------
xarr : None or np.ndarray
The X coordinates of the model over which the centroid should be
computed. If unspecified, the centroid will be in pixel units
"""
if not hasattr(self, 'model'):
raise ValueError("Must fit (or compute) a model before measuring "
"its centroid")
if xarr is None:
xarr = np.arange(self.model.size)
centr = (self.model*xarr).sum() / self.model.sum()
return centr
def logp(self, xarr, data, error, pars=None):
"""
Return the log probability of the model. If the parameter is out of
range, return -inf
"""
if pars is None:
pars = self.parinfo
else:
parinfo = copy.copy(self.parinfo)
for value,parameter in zip(pars,parinfo):
try:
parameter.value = value
except ValueError:
return -np.inf
model = self.n_modelfunc(pars, **self.modelfunc_kwargs)(xarr)
difference = np.abs(data-model)
# prob = 1/(2*np.pi)**0.5/error * exp(-difference**2/(2.*error**2))
#logprob = np.log(1./(2.*np.pi)**0.5/error) * (-difference**2/(2.*error**2))
logprob = (-difference**2/(2.*error**2))
totallogprob = np.sum(logprob)
return totallogprob
def get_emcee_sampler(self, xarr, data, error, **kwargs):
"""
Get an emcee walker for the data & model
Parameters
----------
xarr : pyspeckit.units.SpectroscopicAxis
data : np.ndarray
error : np.ndarray
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> emcee_sampler = sp.specfit.fitter.get_emcee_sampler(sp.xarr, sp.data, sp.error)
>>> p0 = sp.specfit.parinfo
>>> emcee_sampler.run_mcmc(p0,100)
"""
try:
import emcee
except ImportError:
return
def probfunc(pars):
return self.logp(xarr, data, error, pars=pars)
raise NotImplementedError("emcee's metropolis-hastings sampler is not implemented; use pymc")
sampler = emcee.MHSampler(self.npars*self.npeaks+self.vheight, probfunc, **kwargs)
return sampler
def get_emcee_ensemblesampler(self, xarr, data, error, nwalkers, **kwargs):
"""
Get an emcee walker ensemble for the data & model
Parameters
----------
data : np.ndarray
error : np.ndarray
nwalkers : int
Number of walkers to use
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> nwalkers = sp.specfit.fitter.npars * 2
>>> emcee_ensemble = sp.specfit.fitter.get_emcee_ensemblesampler(sp.xarr, sp.data, sp.error, nwalkers)
>>> p0 = np.array([sp.specfit.parinfo.values] * nwalkers)
>>> p0 *= np.random.randn(*p0.shape) / 10. + 1.0
>>> pos,logprob,state = emcee_ensemble.run_mcmc(p0,100)
"""
try:
import emcee
except ImportError:
return
def probfunc(pars):
return self.logp(xarr, data, error, pars=pars)
sampler = emcee.EnsembleSampler(nwalkers,
self.npars*self.npeaks+self.vheight,
probfunc, **kwargs)
return sampler
def get_pymc(self, xarr, data, error, use_fitted_values=False, inf=np.inf,
use_adaptive=False, return_dict=False, **kwargs):
"""
Create a pymc MCMC sampler. Defaults to 'uninformative' priors
Parameters
----------
data : np.ndarray
error : np.ndarray
use_fitted_values : bool
Each parameter with a measured error will have a prior defined by
the Normal distribution with sigma = par.error and mean = par.value
Examples
--------
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> MCuninformed = sp.specfit.fitter.get_pymc(sp.xarr, sp.data, sp.error)
>>> MCwithpriors = sp.specfit.fitter.get_pymc(sp.xarr, sp.data, sp.error, use_fitted_values=True)
>>> MCuninformed.sample(1000)
>>> MCuninformed.stats()['AMPLITUDE0']
>>> # WARNING: This will fail because width cannot be set <0, but it may randomly reach that...
>>> # How do you define a likelihood distribution with a lower limit?!
>>> MCwithpriors.sample(1000)
>>> MCwithpriors.stats()['AMPLITUDE0']
"""
old_errsettings = np.geterr()
try:
import pymc
finally:
# pymc breaks error settings
np.seterr(**old_errsettings)
#def lowerlimit_like(x,lolim):
# "lower limit (log likelihood - set very positive for unacceptable values)"
# return (x>=lolim) / 1e10
#def upperlimit_like(x,uplim):
# "upper limit"
# return (x<=uplim) / 1e10
#LoLim = pymc.distributions.stochastic_from_dist('lolim', logp=lowerlimit_like, dtype=np.float, mv=False)
#UpLim = pymc.distributions.stochastic_from_dist('uplim', logp=upperlimit_like, dtype=np.float, mv=False)
funcdict = {}
# very, very worrisome: pymc changes the values of parinfo
parcopy = copy.deepcopy(self.parinfo)
for par in parcopy:
lolim = par.limits[0] if par.limited[0] else -inf
uplim = par.limits[1] if par.limited[1] else inf
if par.fixed:
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, par.value, par.value, value=par.value)
elif use_fitted_values:
if par.error > 0:
if any(par.limited):
try:
funcdict[par.parname] = pymc.distributions.TruncatedNormal(par.parname, par.value, 1./par.error**2, lolim, uplim)
except AttributeError:
# old versions used this?
funcdict[par.parname] = pymc.distributions.TruncNorm(par.parname, par.value, 1./par.error**2, lolim, uplim)
else:
funcdict[par.parname] = pymc.distributions.Normal(par.parname, par.value, 1./par.error**2)
else:
if any(par.limited):
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, lolim, uplim, value=par.value)
else:
funcdict[par.parname] = pymc.distributions.Uninformative(par.parname, value=par.value)
elif any(par.limited):
lolim = par.limits[0] if par.limited[0] else -1e10
uplim = par.limits[1] if par.limited[1] else 1e10
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, lower=lolim, upper=uplim, value=par.value)
else:
funcdict[par.parname] = pymc.distributions.Uninformative(par.parname, value=par.value)
d = dict(funcdict)
def modelfunc(xarr, pars=parcopy, **kwargs):
for k,v in kwargs.items():
if k in list(pars.keys()):
pars[k].value = v
return self.n_modelfunc(pars, **self.modelfunc_kwargs)(xarr)
funcdict['xarr'] = xarr
funcdet=pymc.Deterministic(name='f',eval=modelfunc,parents=funcdict,doc="The model function")
d['f'] = funcdet
datamodel = pymc.distributions.Normal('data', mu=funcdet,
tau=1/np.asarray(error)**2,
observed=True,
value=np.asarray(data))
d['data']=datamodel
if return_dict:
return d
mc = pymc.MCMC(d)
if use_adaptive:
mc.use_step_method(pymc.AdaptiveMetropolis,[d[p] for p in self.parinfo.names])
return mc
class AstropyModel(SpectralModel):
def __init__(self, model, shortvarnames=None, **kwargs):
"""
Override the SpectralModel initialization
"""
if hasattr(self,__doc__): # how do you extend a docstring really?
self.__doc__ += SpectralModel.__doc__
if shortvarnames is None:
shortvarnames = model.param_names
super(AstropyModel,self).__init__(model, len(model.parameters),
shortvarnames=shortvarnames,
model=model,
**kwargs)
self.mp = None
self.vheight = False
self.npeaks = 1
def _make_parinfo(self, model=None):
self.parinfo = ParinfoList([
Parinfo(parname=name,value=value)
for name,value in zip(model.param_names,model.parameters)])
return self.parinfo, {}
def _parse_parinfo(self, parinfo):
"""
Parse a ParinfoList into astropy.models parameters
"""
if len(parinfo) > self.npars:
if len(parinfo) % self.npars != 0:
raise ValueError("Need to have an integer number of models")
else:
self.modelfunc.param_names = parinfo.names
self.modelfunc.parameters = parinfo.values
else:
self.modelfunc.param_names = parinfo.names
self.modelfunc.parameters = parinfo.values
def fitter(self, xax, data, err=None, quiet=True, veryverbose=False,
debug=False, parinfo=None, params=None, npeaks=None, **kwargs):
import astropy.models as models
if npeaks is not None and npeaks > 1:
raise NotImplementedError("Astropy models cannot be used to fit multiple peaks yet")
if parinfo is not None:
self._parse_parinfo(parinfo)
if params is not None:
self.modelfunc.parameters = params
self.astropy_fitter = models.fitting.NonLinearLSQFitter(self.modelfunc)
if err is None:
self.astropy_fitter(xax, data, **kwargs)
else:
self.astropy_fitter(xax, data, weights=1./err**2, **kwargs)
mpp = self.astropy_fitter.fitpars
cov = self.astropy_fitter.covar
if cov is None:
mpperr = np.zeros(len(mpp))
else:
mpperr = cov.diagonal()
self.model = self.astropy_fitter.model(xax)
if err is None:
chi2 = ((data-self.model)**2).sum()
else:
chi2 = ((data-self.model)**2/err**2).sum()
# update object paramters
self.modelfunc.parameters = mpp
self._make_parinfo(self.modelfunc)
return mpp,self.model,mpperr,chi2
def n_modelfunc(self, pars=None, debug=False, **kwargs):
"""
Only deals with single-peak functions
"""
try:
self._parse_parinfo(pars)
except AttributeError:
self.modelfunc.parameters = pars
return self.modelfunc
|
mikelum/pyspeckit
|
pyspeckit/spectrum/models/model.py
|
Python
|
mit
| 43,935
|
[
"Gaussian"
] |
35b90e7c00ba35130f5f3db4bc1f22732ff8584b75499481713d724ba98822fd
|
from ase.data.molecules import molecule
from ase.parallel import paropen
from gpaw import GPAW
from gpaw.utilities.tools import split_formula
from gpaw.test import equal
cell = [10.,10.,10.]
data = paropen('data.txt', 'w')
##Reference from J. Chem. Phys. Vol 120 No. 15, 15 April 2004, page 6898
tpss_de = {
'Li2': 22.5,
}
tpss_old = {
'Li2': 22.7,
}
exp_bonds_dE = {
'Li2': (2.673,24.4),
}
niters_ref = {'Li2': 21, 'Li': 14}
niter_tolerance = 0
systems = ['Li2']
# Add atoms
for formula in systems:
temp = split_formula(formula)
for atom in temp:
if atom not in systems:
systems.append(atom)
energies = {}
niters = {}
# Calculate energies
for formula in systems:
loa = molecule(formula)
loa.set_cell(cell)
loa.center()
calc = GPAW(h=0.3,
nbands=-2,
xc='PBE',
#fixmom=True,
txt=formula + '.txt')
if len(loa) == 1:
calc.set(hund=True)
else:
pos = loa.get_positions()
pos[1,:] = pos[0,:] + [0.0, 0.0, exp_bonds_dE[formula][0]]
loa.set_positions(pos)
loa.center()
loa.set_calculator(calc)
try:
energy = loa.get_potential_energy()
niters[formula] = calc.get_number_of_iterations()
diff = calc.get_xc_difference('TPSS')
energies[formula] = (energy, energy + diff)
except:
raise#print >> data, formula, 'Error'
else:
print >> data, formula, energy, energy + diff
data.flush()
#calculate atomization energies
file = paropen('tpss.txt', 'w')
print >> file, 'formula\tGPAW\tRef\tGPAW-Ref\tGPAW-exp'
mae_ref, mae_exp, mae_pbe, count = 0.0, 0.0, 0.0, 0
for formula in tpss_de.keys():
try:
atoms_formula = split_formula(formula)
de_tpss = -1.0 * energies[formula][1]
de_pbe = -1.0 * energies[formula][0]
for atom_formula in atoms_formula:
de_tpss += energies[atom_formula][1]
de_pbe += energies[atom_formula][0]
except:
raise#print >>file, formula, 'Error'
else:
de_tpss *= 627.5/27.211
de_pbe *= 627.5/27.211
mae_ref += abs(de_tpss-tpss_de[formula])
mae_exp += abs(de_tpss-exp_bonds_dE[formula][1])
mae_pbe += abs(de_pbe-exp_bonds_dE[formula][1])
count += 1
out = "%s\t%.1f\t%.1f\t%.1f\t%.1f kcal/mol"%(formula,de_tpss,tpss_de[formula],
de_tpss-tpss_de[formula],de_tpss-exp_bonds_dE[formula][1])
print >>file, out
file.flush()
#comparison to gpaw revision 5450 version value in kcal/mol (note the grid:0.3 Ang)
equal(de_tpss, tpss_old[formula], 0.1)
equal(niters[formula], niters_ref[formula], niter_tolerance)
|
qsnake/gpaw
|
gpaw/test/tpss.py
|
Python
|
gpl-3.0
| 2,734
|
[
"ASE",
"GPAW"
] |
567636927c4c3d7c5b4c2ca2418715bd79415aebb370e02fa1f84e27d86c299e
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tags import TagPatterns
class Stat(object):
def __init__(self, name):
self.name = name
self.passed = 0
self.failed = 0
def get_attributes(self, include_label=False, exclude_empty=False,
values_as_strings=False):
attrs = {'pass': self.passed, 'fail': self.failed}
attrs.update(self._get_custom_attrs())
if include_label:
attrs['label'] = self.name
if exclude_empty:
attrs = dict((k, v) for k, v in attrs.items() if v != '')
if values_as_strings:
attrs = dict((k, unicode(v)) for k, v in attrs.items())
return attrs
def _get_custom_attrs(self):
return {}
@property
def total(self):
return self.passed + self.failed
def add_test(self, test):
if test.passed:
self.passed += 1
else:
self.failed += 1
def add_stat(self, other):
self.passed += other.passed
self.failed += other.failed
def __cmp__(self, other):
return cmp(self.name, other.name)
def __nonzero__(self):
return not self.failed
def visit(self, visitor):
visitor.visit_stat(self)
class TotalStat(Stat):
type = 'total'
class SuiteStat(Stat):
type = 'suite'
def __init__(self, suite):
Stat.__init__(self, suite.longname)
self.id = suite.id
self._name = suite.name
def _get_custom_attrs(self):
return {'id': self.id, 'name': self._name}
class TagStat(Stat):
type = 'tag'
def __init__(self, name, doc='', links=None, critical=False,
non_critical=False, combined=''):
Stat.__init__(self, name)
self.doc = doc
self.links = links or []
self.critical = critical
self.non_critical = non_critical
self.combined = combined
@property
def info(self):
if self.critical:
return 'critical'
if self.non_critical:
return 'non-critical'
if self.combined:
return 'combined'
return ''
def _get_custom_attrs(self):
return {'doc': self.doc, 'links': self._get_links_as_string(),
'info': self.info, 'combined': self.combined}
def _get_links_as_string(self):
return ':::'.join('%s:%s' % (title, url) for url, title in self.links)
def __cmp__(self, other):
return cmp(other.critical, self.critical) \
or cmp(other.non_critical, self.non_critical) \
or cmp(bool(other.combined), bool(self.combined)) \
or cmp(self.name, other.name)
class CombinedTagStat(TagStat):
def __init__(self, pattern, name=None, doc='', links=None):
TagStat.__init__(self, name or pattern, doc, links, combined=pattern)
self._matcher = TagPatterns(pattern)
def match(self, tags):
return self._matcher.match(tags)
|
Senseg/robotframework
|
src/robot/model/stats.py
|
Python
|
apache-2.0
| 3,540
|
[
"VisIt"
] |
7012fee869ee357c1d6039fa272e0623722efab1fc51570a7e40789325d1ce16
|
########################################################################
# $HeadURL$
########################################################################
""" DIRAC FileCatalog component representing a directory tree with
enumerated paths
"""
__RCSID__ = "$Id$"
import os
from types import ListType, StringTypes
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryTreeBase import DirectoryTreeBase
MAX_LEVELS = 15
class DirectoryLevelTree(DirectoryTreeBase):
""" Class managing Directory Tree as a simple self-linked structure
with full directory path stored in each node
"""
def __init__(self,database=None):
DirectoryTreeBase.__init__(self,database)
self.treeTable = 'FC_DirectoryLevelTree'
def getTreeType(self):
return 'Directory'
def findDir(self,path,connection=False):
""" Find directory ID for the given path
"""
dpath = os.path.normpath( path )
req = "SELECT DirID,Level from FC_DirectoryLevelTree WHERE DirName='%s'" % dpath
result = self.db._query(req,connection)
if not result['OK']:
return result
if not result['Value']:
return S_OK('')
res = S_OK(result['Value'][0][0])
res['Level'] = result['Value'][0][1]
return res
def findDirs( self, paths, connection=False ):
""" Find DirIDs for the given path list
"""
dpaths = ','.join( [ "'"+os.path.normpath( path )+"'" for path in paths ] )
req = "SELECT DirName,DirID from FC_DirectoryLevelTree WHERE DirName in (%s)" % dpaths
result = self.db._query(req,connection)
if not result['OK']:
return result
dirDict = {}
for dirName, dirID in result['Value']:
dirDict[dirName] = dirID
return S_OK( dirDict )
def removeDir(self,path):
""" Remove directory
"""
result = self.findDir(path)
if not result['OK']:
return result
if not result['Value']:
res = S_OK()
res["DirID"] = 0
return res
dirID = result['Value']
req = "DELETE FROM FC_DirectoryLevelTree WHERE DirID=%d" % dirID
result = self.db._update(req)
result['DirID'] = dirID
return result
def __getNumericPath(self,dirID,connection=False):
""" Get the enumerated path of the given directory
"""
epathString = ','.join( [ 'LPATH%d' % (i+1) for i in range( MAX_LEVELS ) ] )
req = 'SELECT LEVEL,%s FROM FC_DirectoryLevelTree WHERE DirID=%d' % (epathString,dirID)
result = self.db._query(req,connection)
if not result['OK']:
return result
if not result['Value']:
return S_OK([])
row = result['Value'][0]
level = row[0]
epathList = []
for i in range(level):
epathList.append(row[i+1])
result = S_OK(epathList)
result['Level'] = level
return result
def makeDir(self,path):
""" Create a new directory entry
"""
result = self.findDir(path)
if not result['OK']:
return result
dirID = result['Value']
if dirID:
result = S_OK(dirID)
result['NewDirectory'] = False
return result
dpath = path
if path == '/':
level = 0
elements = []
parentDirID = 0
else:
if path[0] == "/":
dpath = path[1:]
elements = dpath.split('/')
level = len(elements)
if level > MAX_LEVELS:
return S_ERROR('Too many directory levels: %d' % level)
result = self.getParent(path)
if not result['OK']:
return result
parentDirID = result['Value']
epathList = []
if parentDirID:
result = self.__getNumericPath(parentDirID)
if not result['OK']:
return result
epathList = result['Value']
names = ['DirName','Level','Parent']
values = [path,level,parentDirID]
if path != '/':
for i in range(1,level,1):
names.append('LPATH%d' % i)
values.append(epathList[i-1])
result = self.db._getConnection()
conn = result['Value']
#result = self.db._query("LOCK TABLES FC_DirectoryLevelTree WRITE; ",conn)
result = self.db._insert('FC_DirectoryLevelTree',names,values,conn)
if not result['OK']:
#resUnlock = self.db._query("UNLOCK TABLES;",conn)
if result['Message'].find('Duplicate') != -1:
#The directory is already added
resFind = self.findDir(path)
if not resFind['OK']:
return resFind
dirID = resFind['Value']
result = S_OK(dirID)
result['NewDirectory'] = False
return result
else:
return result
dirID = result['lastRowId']
# Update the path number
if parentDirID:
# lPath = "LPATH%d" % (level)
# req = " SELECT @tmpvar:=max(%s)+1 FROM FC_DirectoryLevelTree WHERE Parent=%d; " % (lPath,parentDirID)
# resultLock = self.db._query("LOCK TABLES FC_DirectoryLevelTree WRITE; ",conn)
# result = self.db._query(req,conn)
# req = "UPDATE FC_DirectoryLevelTree SET %s=@tmpvar WHERE DirID=%d; " % (lPath,dirID)
# result = self.db._update(req,conn)
# result = self.db._query("UNLOCK TABLES;",conn)
lPath = "LPATH%d" % (level)
req = " SELECT @tmpvar:=max(%s)+1 FROM FC_DirectoryLevelTree WHERE Parent=%d FOR UPDATE; " % ( lPath, parentDirID )
resultLock = self.db._query( "START TRANSACTION; ", conn )
result = self.db._query(req,conn)
req = "UPDATE FC_DirectoryLevelTree SET %s=@tmpvar WHERE DirID=%d; " % (lPath,dirID)
result = self.db._update(req,conn)
result = self.db._query( "COMMIT;", conn )
if not result['OK']:
return result
else:
result = self.db._query( "ROLLBACK;", conn )
result = S_OK(dirID)
result['NewDirectory'] = True
return result
def existsDir(self,path):
""" Check the existence of a directory at the specified path
"""
result = self.findDir(path)
if not result['OK']:
return result
if not result['Value']:
return S_OK({"Exists":False})
else:
return S_OK({"Exists":True,"DirID":result['Value']})
def getParent(self,path):
""" Get the parent ID of the given directory
"""
parent_dir = os.path.dirname(path)
return self.findDir(parent_dir)
def getParentID(self,dirPathOrID):
""" Get the ID of the parent of a directory specified by ID
"""
dirID = dirPathOrID
if type(dirPathOrID) in StringTypes:
result = self.findDir(dirPathOrID)
if not result['OK']:
return result
dirID = result['Value']
if dirID == 0:
return S_ERROR('Root directory ID given')
req = "SELECT Parent FROM FC_DirectoryLevelTree WHERE DirID=%d" % dirID
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('No parent found')
return S_OK(result['Value'][0][0])
def getDirectoryPath(self,dirID):
""" Get directory name by directory ID
"""
req = "SELECT DirName FROM FC_DirectoryLevelTree WHERE DirID=%d" % int(dirID)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory with id %d not found' % int(dirID) )
return S_OK(result['Value'][0][0])
def getDirectoryPaths(self,dirIDList):
""" Get directory name by directory ID list
"""
dirs = dirIDList
if type(dirIDList) != ListType:
dirs = [dirIDList]
if not dirs:
return S_OK( {} )
dirListString = ','.join( [ str( d ) for d in dirs ] )
req = "SELECT DirID,DirName FROM FC_DirectoryLevelTree WHERE DirID in ( %s )" % dirListString
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directories not found: %s' % dirListString )
resultDict = {}
for row in result['Value']:
resultDict[int(row[0])] = row[1]
return S_OK(resultDict)
def getDirectoryName(self,dirID):
""" Get directory name by directory ID
"""
result = self.getDirectoryPath(dirID)
if not result['OK']:
return result
return S_OK(os.path.basename(result['Value']))
def getPathIDs(self,path):
""" Get IDs of all the directories in the parent hierarchy for a directory
specified by its path
"""
elements = path.split('/')
pelements = []
dPath = ''
for el in elements[1:]:
dPath += '/'+el
pelements.append(dPath)
pelements.append( '/' )
pathString = [ "'"+p+"'" for p in pelements ]
req = "SELECT DirID FROM FC_DirectoryLevelTree WHERE DirName in (%s) ORDER BY DirID" % ','.join(pathString)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory %s not found' % path)
return S_OK([ x[0] for x in result['Value'] ])
def getPathIDsByID_old(self,dirID):
""" Get IDs of all the directories in the parent hierarchy for a directory
specified by its ID
"""
# The method should be rather implemented using enumerated paths
result = self.getDirectoryPath(dirID)
if not result['OK']:
return result
dPath = result['Value']
return self.getPathIDs(dPath)
def getPathIDsByID(self,dirID):
""" Get IDs of all the directories in the parent hierarchy for a directory
specified by its ID
"""
result = self.__getNumericPath( dirID )
if not result['OK']:
return result
level = result['Level']
if level == 0:
return S_OK( [dirID] )
lpaths = result['Value']
lpathSelects = []
for l in range( level ):
sel = ' AND '.join( ["Level=%d" % l] + [ 'LPATH%d=%d' % (ll+1,lpaths[ll]) for ll in range( l ) ] )
lpathSelects.append( sel )
selection = '(' + ') OR ('.join( lpathSelects ) + ')'
req = "SELECT Level,DirID from FC_DirectoryLevelTree WHERE %s ORDER BY Level" % selection
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR( 'No result for the path of Directory with ID %d' % dirID )
return S_OK([ x[1] for x in result['Value'] ] + [dirID] )
def getChildren(self,path,connection=False):
""" Get child directory IDs for the given directory
"""
if type(path) in StringTypes:
result = self.findDir(path,connection)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory does not exist: %s' % path )
dirID = result['Value']
else:
dirID = path
req = "SELECT DirID FROM FC_DirectoryLevelTree WHERE Parent=%d" % dirID
result = self.db._query(req,connection)
if not result['OK']:
return result
if not result['Value']:
return S_OK([])
return S_OK([ x[0] for x in result['Value'] ])
def getSubdirectoriesByID(self,dirID,requestString=False,includeParent=False):
""" Get all the subdirectories of the given directory at a given level
"""
req = "SELECT Level FROM FC_DirectoryLevelTree WHERE DirID=%d" % dirID
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory %d not found' % dirID)
level = result['Value'][0][0]
sPaths = []
if requestString:
req = "SELECT DirID FROM FC_DirectoryLevelTree"
else:
req = "SELECT Level,DirID FROM FC_DirectoryLevelTree"
if level > 0:
req += " AS F1"
for i in range(1,level+1):
sPaths.append('LPATH%d' % i)
pathString = ','.join(sPaths)
req += " JOIN (SELECT %s FROM FC_DirectoryLevelTree WHERE DirID=%d) AS F2 ON " % (pathString,dirID)
sPaths = []
for i in range(1,level+1):
sPaths.append('F1.LPATH%d=F2.LPATH%d' % (i,i))
pString = ' AND '.join(sPaths)
if includeParent:
req += "%s AND F1.Level >= %d" % (pString,level)
else:
req += "%s AND F1.Level > %d" % (pString,level)
if requestString:
return S_OK(req)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK({})
resDict = {}
for row in result['Value']:
resDict[row[1]] = row[0]
return S_OK(resDict)
def countSubdirectories(self, dirId, includeParent = True):
result = self.getSubdirectoriesByID( dirId, requestString = True, includeParent = includeParent )
if not result['OK']:
return result
reqDir = result['Value'].replace( 'SELECT DirID FROM', 'SELECT count(*) FROM' )
result = self.db._query( reqDir )
if not result['OK']:
return result
return S_OK( result['Value'][0][0] )
def getAllSubdirectoriesByID(self,dirList):
""" Get IDs of all the subdirectories of directories in a given list
"""
dirs = dirList
if type(dirList) != ListType:
dirs = [dirList]
resultList = []
parentList = dirs
while parentList:
subResult = []
dirListString = ','.join( [ str( d ) for d in parentList ] )
req = 'SELECT DirID from FC_DirectoryLevelTree WHERE Parent in ( %s )' % dirListString
result = self.db._query(req)
if not result['OK']:
return result
for row in result['Value']:
subResult.append(row[0])
if subResult:
resultList += subResult
parentList = subResult
return S_OK(resultList)
def getSubdirectories(self,path):
""" Get subdirectories of the given directory
"""
result = self.findDir(path)
if not result['OK']:
return result
if not result['Value']:
return S_OK({})
dirID = result['Value']
result = self.getSubdirectoriesByID(dirID)
return result
def recoverOrphanDirectories( self, credDict ):
""" Recover orphan directories
"""
# Find out orphan directories
treeTable = 'FC_DirectoryLevelTree'
req = "SELECT DirID,Parent,Level FROM %s WHERE Parent NOT IN ( SELECT DirID from %s )" % (treeTable,treeTable)
result = self.db._query( req )
if not result['OK']:
return result
parentDict = {}
for dirID,parentID,level in result['Value']:
result = self.getDirectoryPath( dirID )
if not result['OK']:
continue
dirPath = result['Value']
parentPath = os.path.dirname( dirPath )
if not dirPath == '/':
parentDict.setdefault( parentPath, {} )
parentDict[parentPath].setdefault( 'DirList', [] )
parentDict[parentPath]['DirList'].append( dirID )
parentDict[parentPath]['OldParentID'] = parentID
for parentPath, dirDict in parentDict.items():
dirIDList = dirDict['DirList']
oldParentID = dirDict['OldParentID']
result = self.findDir( parentPath )
if not result['OK']:
continue
if result['Value']:
# The parent directory was recreated already
parentID = result['Value']
else:
# The parent directory was lost
result = self.makeDirectories( parentPath, credDict )
if not result['OK']:
continue
parentID = result['Value']
# We have created a new directory but let's keep the old ID
req = "UPDATE FC_DirectoryLevelTree SET DirID=%s WHERE DirID=%s" % ( oldParentID, parentID )
result = self.db._update( req )
if not result['OK']:
continue
req = "UPDATE FC_DirectoryInfo SET DirID=%s WHERE DirID=%s" % ( oldParentID, parentID )
result = self.db._update( req )
parentID = oldParentID
# We have to change also the ownership of the new directory to the most likely one
# which is the owner of the containing directory
containerPath = os.path.dirname( parentPath )
result = self.getDirectoryParameters( containerPath )
if result['OK']:
conDict = result['Value']
uid = conDict['UID']
gid = conDict['GID']
result = self._setDirectoryUid(parentID,uid)
result = self._setDirectoryGid(parentID,gid)
dirString = ','.join( [ str(dirID) for dirID in dirIDList ] )
req = "UPDATE FC_DirectoryLevelTree SET Parent=%s WHERE DirID IN (%s)" % ( parentID, dirString )
result = self.db._update( req )
if not result['OK']:
continue
connection = self._getConnection()
result = self.db._query("LOCK TABLES FC_DirectoryLevelTree WRITE", connection )
if not result['OK']:
resUnlock = self.db._query("UNLOCK TABLES", connection )
return result
result = self.__rebuildLevelIndexes( parentID, connection)
resUnlock = self.db._query("UNLOCK TABLES", connection )
return S_OK()
def _getConnection( self, connection=False ):
if connection:
return connection
res = self.db._getConnection()
if res['OK']:
return res['Value']
return connection
def __rebuildLevelIndexes( self, parentID, connection=False ):
""" Rebuild level indexes for all the subdirectories
"""
result = self.__getNumericPath( parentID, connection )
if not result['OK']:
return result
parentIndexList = result['Value']
parentLevel = result['Level']
result = self.getChildren( parentID, connection )
if not result['OK']:
return result
subIDList = result['Value']
indexList = list( parentIndexList )
indexList.append( 0 )
for dirID in subIDList:
indexList[-1] += 1
lpaths = [ 'LPATH%d=%d' % (i+1,indexList[i]) for i in range(parentLevel+1) ]
lpathString = 'SET '+','.join( lpaths )
req = "UPDATE FC_DirectoryLevelTree %s WHERE DirID=%s" % ( lpathString, dirID )
result = self.db._update( req, connection )
if not result['OK']:
return result
result = self.__rebuildLevelIndexes( dirID, connection )
return S_OK()
|
marcelovilaca/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/DirectoryLevelTree.py
|
Python
|
gpl-3.0
| 18,112
|
[
"DIRAC"
] |
1c74c04b459db143adf63c111909323cb5dee66ec514689a7c7ffd1a5ff14c8f
|
"""
===============================
Ordinary Least Squares with SGD
===============================
Simple Ordinary Least Squares example with stochastic
gradient descent, we draw the linear least
squares solution for a random set of points in the plane.
"""
print __doc__
import pylab as pl
from sklearn.linear_model import SGDRegressor
from sklearn.datasets.samples_generator import make_regression
# this is our test set, it's just a straight line with some
# gaussian noise
X, Y = make_regression(n_samples=100, n_features=1, n_informative=1,
random_state=0, noise=35)
# run the classifier
clf = SGDRegressor(alpha=0.1, n_iter=20)
clf.fit(X, Y)
# and plot the result
pl.scatter(X, Y, color='black')
pl.plot(X, clf.predict(X), color='blue', linewidth=3)
pl.show()
|
mrshu/scikit-learn
|
examples/linear_model/plot_sgd_ols.py
|
Python
|
bsd-3-clause
| 796
|
[
"Gaussian"
] |
d08a0442e3a113d14bdbc24dfdd476146dcd44d2a430db60c44a25966ad4d46c
|
tests=[
("python","UnitTestTrainer.py",{}),
]
longTests=[
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
rdkit/rdkit-orig
|
rdkit/ML/Neural/test_list.py
|
Python
|
bsd-3-clause
| 221
|
[
"RDKit"
] |
fdb65de0274467e3a53852cda5dc41ac9b7a51c99eb93bd2b6a7044d6b5346f1
|
#!/usr/bin/env python
"""
Extracts data from reference files or calculates FF data.
Takes a sequence of keywords corresponding to various
datatypes (ex. mb = MacroModel bond lengths) followed by filenames,
and extracts that particular data type from the file.
Note that the order of filenames IS IMPORTANT!
Used to manage calls to MacroModel but that is now done in the
Mae class inside filetypes. I'm still debating if that should be
there or here. Will see how this framework translates into
Amber and then decide.
"""
from __future__ import absolute_import
from __future__ import division
import argparse
import logging
import logging.config
import numpy as np
import os
import sys
# I don't really want to import all of chain if possible. I only want
# chain.from_iterable.
# chain.from_iterable flattens a list of lists similar to:
# [child for parent in grandparent for child in parent]
# However, I think chain.from_iterable works on any number of nested lists.
from itertools import chain
from textwrap import TextWrapper
import constants as co
import compare
import datatypes
import filetypes
import parameters
logger = logging.getLogger(__name__)
# Commands where we need to load the force field.
COM_LOAD_FF = ['ma', 'mb', 'mt',
'ja', 'jb', 'jt']
# Commands related to Gaussian.
COM_GAUSSIAN = ['gaa','gaao','gab','gabo','gat','gato',
'gta','gtb','gtt','ge','ge1', 'gea', 'geo','ge1o', 'geao',
'gh', 'geigz']
# Commands related to Jaguar (Schrodinger).
COM_JAGUAR = ['jq', 'jqh', 'jqa',
'je', 'jeo', 'jea', 'jeao',
'jh', 'jeigz']
# Commands related to MacroModel (Schrodinger).
# Seems odd that the Jaguar geometry datatypes are in here, but we
# do a MacroModel calculation to get the data in an easy form to
# extract.
COM_MACROMODEL = ['ja', 'jb', 'jt',
'mq', 'mqh', 'mqa',
'ma', 'mb', 'mt',
'me', 'meo', 'mea', 'meao',
'mh', 'mjeig', 'mgeig',
'mp', 'mgESP', 'mjESP']
# Commands related to Tinker.
COM_TINKER = ['ta','tao', 'tb', 'tbo',
'tt','tto', 'te', 'teo',
'tea','teao', 'th',
'tjeigz', 'tgeig']
# Commands related to Amber.
COM_AMBER = ['ae','ae1','aeo','ae1o','abo','aao','ato','ah']
# All other commands.
COM_OTHER = ['r']
# All possible commands.
COM_ALL = COM_GAUSSIAN + COM_JAGUAR + COM_MACROMODEL + COM_TINKER + \
COM_AMBER + COM_OTHER
def main(args):
"""
Arguments
---------
args : string or list of strings
Evaluated using parser returned by return_calculate_parser(). If
it's a string, it will be converted into a list of strings.
"""
# Should be a list of strings for use by argparse. Ensure that's the case.
# basestring is deprecated in python3, str is probably safe to use in both
# but should be tested, for now sys.version_info switch can handle it
if sys.version_info > (3, 0):
if isinstance(args, str):
args = args.split()
else:
if isinstance(args, basestring):
args = args.split()
parser = return_calculate_parser()
opts = parser.parse_args(args)
# This makes a dictionary that only contains the arguments related to
# extracting data from everything in the argparse dictionary, opts.
# Given that the user supplies:
# python calculate.py -me a1.01.mae a2.01.mae a3.01.mae -me b1.01.mae
# b2.01.mae -mb a1.01.mae b1.01.mae -jeig a1.01.in,a1.out
# b1.01.in,b1.out
# commands looks like:
# {'me': [['a1.01.mae', 'a2.01.mae', 'a3.01.mae'],
# ['b1.01.mae', 'b2.01.mae']],
# 'mb': [['a1.01.mae'], ['b1.01.mae']],
# 'jeig': [['a1.01.in,a1.out', 'b1.01.in,b1.out']]
# }
commands = {key: value for key, value in opts.__dict__.items() if key
in COM_ALL and value}
# Add in the empty commands. I'd rather not do this, but it makes later
# coding when collecting data easier.
for command in COM_ALL:
if command not in commands:
commands.update({command: []})
pretty_all_commands(commands)
# This groups all of the data type commands associated with one file.
# commands_for_filenames looks like:
# {'a1.01.mae': ['me', 'mb'],
# 'a1.01.in': ['jeig'],
# 'a1.out': ['jeig'],
# 'a2.01.mae': ['me'],
# 'a3.01.mae': ['me'],
# 'b1.01.mae': ['me', 'mb'],
# 'b1.01.in': ['jeig'],
# 'b1.out': ['jeig'],
# 'b2.01.mae': ['me']
# }
commands_for_filenames = sort_commands_by_filename(commands)
pretty_commands_for_files(commands_for_filenames)
# This dictionary associates the filename that the user supplied with
# the command file that has to be used to execute some backend software
# calculate in order to retrieve the data that the user requested.
# inps looks like:
# {'a1.01.mae': <__main__.Mae object at 0x1110e10>,
# 'a1.01.in': None,
# 'a1.out': None,
# 'a2.01.mae': <__main__.Mae object at 0x1733b23>,
# 'a3.01.mae': <__main__.Mae object at 0x1853e12>,
# 'b1.01.mae': <__main__.Mae object at 0x2540e10>,
# 'b1.01.in': None,
# 'b1.out': None,
# 'b2.01.mae': <__main__.Mae object at 0x1353e11>,
# }
inps = {}
# This generates any of the necessary command files. It uses
# commands_for_filenames, which contains all of the data types associated
# with the given file.
# Stuff below doesn't need both comma separated filenames simultaneously.
for filename, commands_for_filename in commands_for_filenames.items():
logger.log(1, '>>> filename: {}'.format(filename))
logger.log(1, '>>> commands_for_filename: {}'.format(
commands_for_filename))
# These next two if statements will break down what command files
# have to be written by the backend software package.
if any(x in COM_MACROMODEL for x in commands_for_filename):
if os.path.splitext(filename)[1] == '.mae':
inps[filename] = filetypes.Mae(
os.path.join(opts.directory, filename))
inps[filename].commands = commands_for_filename
inps[filename].write_com(sometext=opts.append)
#Has to be here even though this is a Gaussian Job.
if os.path.splitext(filename)[1] == '.chk':
# The generated com file will be used as the input filename. It
# also seems best to do the gaussian calculation in the
# collect_data function since we need to collect the force
# fields partial charges.
com_filename = os.path.splitext(filename)[0] + '.ESP.q2mm.com'
inps[com_filename] = filetypes.GaussCom(
os.path.join(opts.directory, com_filename))
inps[com_filename].commands = commands_for_filename
inps[com_filename].read_newzmat(filename)
elif any(x in COM_TINKER for x in commands_for_filename):
if os.path.splitext(filename)[1] == '.xyz':
inps[filename] = filetypes.TinkerXYZ(
os.path.join(opts.directory, filename))
inps[filename].commands = commands_for_filename
# Gaussian to Tinker
elif any(x in ['gta','gtb','gtt'] for x in commands_for_filename):
# For bond, angle, torsion taken from Gaussian
# The xyz will be collected from Gaussian and be rewritten in corresponding software
#
# Q2MM takes commands_for_filename for each line of RDAT and CDAT
# must make difference type
# Tinker
if os.path.splitext(filename)[1] == ".log":
inps[filename] = filetypes.TinkerXYZ_FOR_GAUS(
os.path.join(opts.directory, filename))
inps[filename].commands = commands_for_filename
# Gausssian to Amber
elif any(x in ['gaa','gab','gat','gaao','gabo','gato'] for x in commands_for_filename):
if os.path.splitext(filename)[1] == ".log":
inps[filename] = filetypes.AmberLeap_Gaus(
os.path.join(opts.directory, filename))
inps[filename].commands = commands_for_filename
elif any(x in COM_AMBER for x in commands_for_filename):
if os.path.splitext(filename)[1] == ".in": # leap.in as for now
inps[filename] = filetypes.AmberLeap(os.path.join(opts.directory, filename))
inps[filename].commands = commands_for_filename
# This doesn't work.
# We need to know both filenames simultaneously for this Amber crap.
# Have to add these to `inps` in some other way.
# pass
# In this case, no command files have to be written.
else:
inps[filename] = None
# Stuff below needs both comma separated filenames simultaneously.
# Do the Amber inputs.
# Leaving the filenames together because Taylor said this would work well.
# for comma_sep_filenames in flatten(commands['ae']):
# # Maybe make more specific later.
# inps[comma_sep_filenames] = filetypes.AmberInput(
# 'DOES_PATH_EVEN_MATTER')
# split_it = comma_sep_filenames.split(',')
# inps[comma_sep_filenames].directory = opts.directory
# inps[comma_sep_filenames].inpcrd = split_it[0]
# inps[comma_sep_filenames].prmtop = split_it[1]
logger.log(1, '>>> commands: {}'.format(commands))
# Check whether or not to skip calculations.
if opts.norun or opts.fake:
logger.log(15, " -- Skipping backend calculations.")
else:
for filename, some_class in inps.items():
logger.log(1, '>>> filename: {}'.format(filename))
logger.log(1, '>>> some_class: {}'.format(some_class))
# Works if some class is None too.
if hasattr(some_class, 'run'):
# Ideally this can be the same for each software backend,
# but that means we're going to have to make some changes
# so that this token argument is handled properly.
some_class.run(check_tokens=opts.check)
# `data` is a list comprised of datatypes.Datum objects.
# If we remove/with sorting removed, the Datum class is less
# useful. We may want to reduce this to a N x 3 matrix or
# 3 vectors (labels, weights, values).
sub_names = ['OPT']
if opts.subnames:
sub_names = opts.subnames
if opts.fake:
data = collect_data_fake(
commands, inps, direc=opts.directory, invert=opts.invert,
sub_names=sub_names)
else:
data = collect_data(
commands, inps, direc=opts.directory, invert=opts.invert,
sub_names=sub_names)
# Adds weights to the data points in the data list.
if opts.weight:
compare.import_weights(data)
# Optional printing or logging of data.
if opts.doprint:
pretty_data(data, log_level=None)
return data
def return_calculate_parser(add_help=True, parents=None):
'''
Command line argument parser for calculate.
Arguments
---------
add_help : bool
Whether or not to add help to the parser. Default
is True.
parents : argparse.ArgumentParser
Parent parser incorporated into this parser. Default
is None.
'''
# Whether or not to add parents parsers. Not sure if/where this may be used
# anymore.
if parents is None: parents = []
# Whether or not to add help. You may not want to add help if these
# arguments are being used in another, higher level parser.
if add_help:
parser = argparse.ArgumentParser(
description=__doc__, parents=parents)
else:
parser = argparse.ArgumentParser(
add_help=False, parents=parents)
# GENERAL OPTIONS
opts = parser.add_argument_group("calculate options")
opts.add_argument(
'--append', '-a', type=str, metavar='sometext',
help='Append this text to command files generated by Q2MM.')
opts.add_argument(
'--directory', '-d', type=str, metavar='somepath', default=os.getcwd(),
help=('Directory searched for files '
'(ex. *.mae, *.log, mm3.fld, etc.). '
'Subshell commands (ex. MacroModel) are executed from here. '
'Default is the current directory.'))
opts.add_argument(
'--doprint', '-p', action='store_true',
help=("Logs data. Can generate extensive log files."))
opts.add_argument(
'--fake', action='store_true',
help=("Generate fake data sets. Used to expedite testing."))
opts.add_argument(
'--ffpath', '-f', type=str, metavar='somepath',
help=("Path to force field. Only necessary for certain data types "
"if you don't provide the substructure name."))
opts.add_argument(
'--invert', '-i', type=float, metavar='somefloat',
help=("This option will invert the smallest eigenvalue to be whatever "
"value is specified by this argument whenever a Hessian is "
"read."))
opts.add_argument(
'--nocheck', '-nc', action='store_false', dest='check', default=True,
help=("By default, Q2MM checks whether MacroModel tokens are "
"available before attempting a MacroModel calculation. If this "
"option is supplied, MacroModel will not check for tokens "
"first."))
opts.add_argument(
'--norun', '-n', action='store_true',
help="Don't run 3rd party software.")
opts.add_argument(
'--subnames', '-s', type=str, nargs='+',
metavar='"Substructure Name OPT"',
help=("Names of the substructures containing parameters to "
"optimize in a mm3.fld file."))
opts.add_argument(
'--weight', '-w', action='store_true',
help='Add weights to data points.')
# GAUSSIAN OPTIONS
gau_args = parser.add_argument_group("gaussian reference data types")
gau_args.add_argument(
'-gta', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian angles using Tinker.'))
gau_args.add_argument(
'-gtb', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian bonds using Tinker.'))
gau_args.add_argument(
'-gtt', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian torsions using Tinker.'))
gau_args.add_argument(
'-gaa', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian angles using Amber.'))
gau_args.add_argument(
'-gab', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian bonds using Amber.'))
gau_args.add_argument(
'-gat', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian torsions using Amber.'))
gau_args.add_argument(
'-gaao', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian angles using Amber (POST OPT).'))
gau_args.add_argument(
'-gabo', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian bonds using Amber (POST OPT).'))
gau_args.add_argument(
'-gato', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian torsions using Amber (POST OPT).'))
gau_args.add_argument(
'-ge', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian energies.'))
gau_args.add_argument(
'-ge1', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian energy.'))
gau_args.add_argument(
'-gea', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian energies. Energies will be relative to the average '
'energy within this data type.'))
gau_args.add_argument(
'-geo', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian energies. Same as -ge, except the files selected '
'by this command will have their energies compared to those '
'selected by -meo.'))
gau_args.add_argument(
'-ge1o', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian energy. Used for FF a1o commands.'))
gau_args.add_argument(
'-geao', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian energies. Same as -ge, except the files selected '
'by this command will have their energies compared to those '
'selected by -meo. Energies will be relative to the average '
'energy within this data type.'))
gau_args.add_argument(
'-gh', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help='Gaussian Hessian extracted from a .log archive.')
gau_args.add_argument(
'-geigz', type=str, nargs='+', action='append',
default=[], metavar='somename.log',
help=('Gaussian eigenmatrix. Incluldes all elements, but zeroes '
'all off-diagonal elements. Uses only the .log for '
'the eigenvalues and eigenvectors.'))
# JAGUAR OPTIONS
jag_args = parser.add_argument_group("jaguar reference data types")
jag_args.add_argument(
'-jq', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='Jaguar partial charges.')
jag_args.add_argument(
'-jqh', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help=('Jaguar partial charges (excludes aliphatic hydrogens). '
'Sums aliphatic hydrogen charges into their bonded sp3 '
'carbon.'))
jag_args.add_argument(
'-jqa', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help=('Jaguar partial charges. Sums the partial charge of all singly '
'bonded hydrogens into its connected atom.'))
jag_args.add_argument(
'-je', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='Jaguar energies.')
jag_args.add_argument(
'-jea', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help=('Jaguar energies. Everything will be relative to the average '
'energy.'))
jag_args.add_argument(
'-jeo', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help=('Jaguar energies. Same as -je, except the files selected '
'by this command will have their energies compared to those '
'selected by -meo.'))
jag_args.add_argument(
'-jeao', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help=('Jaguar energies. Same as -jea, except the files selected '
'by this command will have their energies compared to those '
'selected by -meao.'))
jag_args.add_argument(
'-ja', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='Jaguar angles.')
jag_args.add_argument(
'-jb', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='Jaguar bond lengths.')
jag_args.add_argument(
'-jt', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='Jaguar torsions.')
jag_args.add_argument(
'-jh', type=str, nargs='+', action='append',
default=[], metavar='somename.in',
help='Jaguar Hessian.')
jag_args.add_argument(
'-jeigz', type=str, nargs='+', action='append',
default=[], metavar='somename.in,somename.out',
help=('Jaguar eigenmatrix. Incluldes all elements, but zeroes '
'all off-diagonal elements.'))
# ADDITIONAL REFERENCE OPTIONS
ref_args = parser.add_argument_group("other reference data types")
ref_args.add_argument(
'-r', type=str, nargs='+', action='append',
default=[], metavar='somename.txt',
help=('Read reference data from file. The reference file should '
'3 space or tab separated columns. Column 1 is the labels, '
'column 2 is the weights and column 3 is the values.'))
# MACROMODEL OPTIONS
mm_args = parser.add_argument_group("macromodel data types")
mm_args.add_argument(
'-mq', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='MacroModel charges.')
mm_args.add_argument(
'-mqh', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='MacroModel charges (excludes aliphatic hydrogens).')
mm_args.add_argument(
'-mqa', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help=('MacroModel partial charges. Sums the partial charge of all '
'singly bonded hydrogens into its connected atom.'))
mm_args.add_argument(
'-me', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='MacroModel energies (pre-FF optimization).')
mm_args.add_argument(
'-mea', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='MacroModel energies (pre-FF optimization). Energies will be '
'relative to the average energy.')
mm_args.add_argument(
'-meo', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='MacroModel energies (post-FF optimization).')
mm_args.add_argument(
'-meao', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='MacroModel energies (post-FF optimization). Energies will be '
'relative to the average energy.')
mm_args.add_argument(
'-mb', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='MacroModel bond lengths (post-FF optimization).')
mm_args.add_argument(
'-ma', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='MacroModel angles (post-FF optimization).')
mm_args.add_argument(
'-mt', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='MacroModel torsions (post-FF optimization).')
mm_args.add_argument(
'-mh', type=str, nargs='+', action='append',
default=[], metavar='somename.mae',
help='MacroModel Hessian.')
mm_args.add_argument(
'-mjeig', type=str, nargs='+', action='append',
default=[], metavar='somename.mae,somename.out',
help='MacroModel eigenmatrix (all elements). Uses Jaguar '
'eigenvectors.')
mm_args.add_argument(
'-mgeig', type=str, nargs='+', action='append',
default=[], metavar='somename.mae,somename.log',
help='MacroModel eigenmatrix (all elements). Uses Gaussian '
'eigenvectors.')
mm_args.add_argument(
'-mp', type=str, nargs='+', action='append',
default=[], metavar='somename.fld,somename.txt',
help='Uses a MM3* FF file (somename.fld) and a parameter file '
'(somename.txt) to use the current FF parameter values as data. This '
'is used for harmonic parameter tethering.')
mm_args.add_argument(
'-mgESP', type=str, nargs='+', action='append',
default=[], metavar='somename.mae,somename.chk',
help='Uses the partial charges obtained from the FF and *mae file to '
'determine the RMS of electrostatic fitting from a gaussain *chk file.')
mm_args.add_argument(
'-mjESP', type=str, nargs='+', action='append',
default=[], metavar='somename.mae,somename.in',
help='Uses the partial charges obtained from the FF and *mae file to '
'determine the RMS of electrostatic fitting from a schrodinger *in '
'file.')
# TINKER OPTIONS
tin_args = parser.add_argument_group("tinker data types")
tin_args.add_argument(
'-te', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker energies (pre-FF optimization).')
tin_args.add_argument(
'-tea', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker energies (pre-FF optimization). Energies will be '
'relative to the average energy.')
tin_args.add_argument(
'-teo', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker energies (post-FF optimization).')
tin_args.add_argument(
'-teao', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker energies (post-FF optimization). Energies will be '
'relative to the average energy.')
tin_args.add_argument(
'-tb', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker bond lengths (pre-FF optimization).')
tin_args.add_argument(
'-tbo', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker bond lengths (post-FF optimization).')
tin_args.add_argument(
'-ta', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker angles (pre-FF optimization).')
tin_args.add_argument(
'-tao', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker angles (post-FF optimization).')
tin_args.add_argument(
'-tt', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker torsions (pre-FF optimization).')
tin_args.add_argument(
'-tto', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker torsions (post-FF optimization).')
tin_args.add_argument(
'-th', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz',
help='Tinker Hessian.')
tin_args.add_argument(
'-tjeig', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz,somename.out',
help='Tinker eigenmatrix (all elements). Uses Jaguar '
'eigenvectors.')
tin_args.add_argument(
'-tgeig', type=str, nargs='+', action='append',
default=[], metavar='somename.xyz,somename.log',
help='Tinker eigenmatrix (all elements). Uses Gaussian '
'eigenvectors.')
# AMBER OPTIONS
amb_args = parser.add_argument_group("amber data types")
amb_args.add_argument(
'-ae', type=str, nargs='+', action='append',
default=[], metavar='somename.inpcrd,somename.prmtop',
help='Amber energies.')
amb_args.add_argument(
'-abo', type=str, nargs='+', action='append',
default=[], metavar='somename.in',
help=('Amber bonds (post-FF optimization).'))
amb_args.add_argument(
'-aao', type=str, nargs='+', action='append',
default=[], metavar='somename.in',
help=('Amber angles (post-FF optimization).'))
amb_args.add_argument(
'-ato', type=str, nargs='+', action='append',
default=[], metavar='somename.in',
help=('Amber torsion (post-FF optimization).'))
amb_args.add_argument(
'-ae1', type=str, nargs='+', action='append',
default=[], metavar='somename.in',
help='Amber energy (pre-FF optimization).')
amb_args.add_argument(
'-ae1o', type=str, nargs='+', action='append',
default=[], metavar='somename.in',
help='Amber energy (post-FF optimization).')
amb_args.add_argument(
'-ah', type=str, nargs='+', action='append',
default=[], metavar='somename.in',
help='Amber Hessian (post-FF optimization).')
amb_args.add_argument(
'-aha', type=str, nargs='+', action='append',
default=[], metavar='somename.in',
help='Amber Hessian (post-FF optimization).')
return parser
def check_outs(filename, outs, classtype, direc):
"""
Reads a file if necessary. Checks the output dictionary first in
case the file has already been loaded.
Could work on easing the use of this by somehow reducing number of
arguments required.
"""
logger.log(1, '>>> filename: {}'.format(filename))
logger.log(1, '>>> outs: {}'.format(outs))
logger.log(1, '>>> classtype: {}'.format(classtype))
logger.log(1, '>>> direc: {}'.format(direc))
if filename not in outs:
outs[filename] = \
classtype(os.path.join(direc, filename))
return outs[filename]
def collect_reference(path):
"""
Reads the data inside a reference data text file.
This must have 3 columns:
1. Labels
2. Weights
3. Values
"""
data = []
with open(path, 'r') as f:
for i, line in enumerate(f):
# Skip certain lines.
if line[0] in ['-', '#']:
continue
# if line.startswith('-'):
# continue
# Remove everything following a # in a line.
line = line.partition('#')[0]
cols = line.split()
# There should always be 3 columns.
assert len(cols) == 3, \
'Error reading line {} from {}: {}'.format(
i, path, line)
lbl, wht, val = cols
datum = datatypes.Datum(lbl=lbl, wht=float(wht), val=float(val))
# Added this from the function below, read_reference()
lbl_to_data_attrs(datum, lbl)
data.append(datum)
return np.array(data)
# Must be rewritten to go in a particular order of data types every time.
def collect_data(coms, inps, direc='.', sub_names=['OPT'], invert=None):
"""
Arguments
---------
invert : None or float
If given, will modify the smallest value of the Hessian to
this value.
"""
# outs looks like:
# {'filename1': <some class for filename1>,
# 'filename2': <some class for filename2>,
# 'filename3': <some class for filename3>
# }
outs = {}
# List of Datum objects.
data = []
# REFERENCE DATA TEXT FILES
# No grouping is necessary for this data type, so flatten the list of
# lists.
filenames = chain.from_iterable(coms['r'])
for filename in filenames:
# Unlike most datatypes, these Datum only get the attributes _lbl,
# val and wht. This is to ensure that making and working with these
# reference text files isn't too cumbersome.
data.extend(collect_reference(os.path.join(direc, filename)))
# MACROMODEL MM3* CURRENT PARAMETER VALUES
filenames = chain.from_iterable(coms['mp'])
for comma_filenames in filenames:
# FF file and parameter file.
name_fld, name_txt = comma_filenames.split(',')
ff = datatypes.MM3(os.path.join(direc, name_fld))
ff.import_ff()
ff.params = parameters.trim_params_by_file(
ff.params, os.path.join(direc, name_txt))
for param in ff.params:
data.extend([datatypes.Datum(
val=param.value,
com='mp',
typ='p',
src_1=name_fld,
src_2=name_txt,
idx_1=param.mm3_row,
idx_2=param.mm3_col)])
# JAGUAR ENERGIES
filenames_s = coms['je']
# idx_1 is the number used to group sets of relative energies.
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
mae = check_outs(filename, outs, filetypes.Mae, direc)
# idx_2 corresponds to the structure inside the file in case the
# .mae files contains multiple structures.
for idx_2, structure in enumerate(mae.structures):
try:
energy = structure.props['r_j_Gas_Phase_Energy']
except KeyError:
energy = structure.props['r_j_QM_Energy']
energy *= co.HARTREE_TO_KJMOL
temp.append(datatypes.Datum(
val=energy,
com='je',
typ='e',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=idx_2 + 1))
# For this data type, we set everything relative.
zero = min([x.val for x in temp])
for datum in temp:
datum.val -= zero
data.extend(temp)
# KJK
# FOR A SINGLE MODEL SYSTEM FITTING
# GAUSSIAN ENERGY
filename_s = coms['ge1']
for idx_1, filenames in enumerate(filename_s):
temp = []
for filename in filenames:
log = check_outs(filename, outs, filetypes.GaussLog, direc)
things_to_add = []
for thing_label in co.GAUSSIAN_ENERGIES:
thing = log.structures[0].props[thing_label]
if ',' in thing:
thing = [float(x) for x in thing.split(',')]
else:
thing = [float(thing)]
things_to_add.append(thing)
energies = [0.] * len(things_to_add[0])
for thing_group in things_to_add:
for i, thing in enumerate(thing_group):
energies[i] += thing
energies = [x * co.HARTREE_TO_KJMOL for x in energies]
for i, e in enumerate(energies):
temp.append(datatypes.Datum(
val=e,
com='ge1',
typ='e1',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=i + 1))
data.extend(temp)
# FOR A SINGLE MODEL SYSTEM FITTING (Comparing to Optimized structure)
# GAUSSIAN ENERGY
filename_s = coms['ge1o']
for idx_1, filenames in enumerate(filename_s):
temp = []
for filename in filenames:
log = check_outs(filename, outs, filetypes.GaussLog, direc)
things_to_add = []
for thing_label in co.GAUSSIAN_ENERGIES:
thing = log.structures[0].props[thing_label]
if ',' in thing:
thing = [float(x) for x in thing.split(',')]
else:
thing = [float(thing)]
things_to_add.append(thing)
energies = [0.] * len(things_to_add[0])
for thing_group in things_to_add:
for i, thing in enumerate(thing_group):
energies[i] += thing
energies = [x * co.HARTREE_TO_KJMOL for x in energies]
for i, e in enumerate(energies):
temp.append(datatypes.Datum(
val=e,
com='ge1o',
typ='e1o',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=i + 1))
data.extend(temp)
# GAUSSIAN ENERGIES
filename_s = coms['ge']
for idx_1, filenames in enumerate(filename_s):
temp = []
for filename in filenames:
log = check_outs(filename, outs, filetypes.GaussLog, direc)
# This will be a list of lists. For example, let's say that
# co.GAUSSIAN_ENERGIES is ['HF', 'ZeroPoint'], then
# the 1st list in things_to_add would be the HF energies
# and the 2nd list would be the ZP energies.
#
# Consider if you had ['HF', 'ZeroPoint'] as co.GAUSSIAN_ENERGIES
# and your archive had this:
# HF=0.634,0.2352\ZeroPoint=0.01234,0.0164
# The resulting things_to_add would be:
# things_to_add = [[0.634, 0.2352],
# [0.01234, 0.0164]]
things_to_add = []
# Remember, thing_label is whatever you specified in
# co.GAUSSIAN_ENERGIES.
for thing_label in co.GAUSSIAN_ENERGIES:
# Consider if your Gaussian log archive has the following:
# HF=0.234,0.1234,0.5732
# Then, if co.GAUSSIAN_ENERGIES includes 'HF', then that
# particular thing, or sublist that goes into things_to_add,
# would look like:
# thing = ['0.234', '0.1234', '0.5732']
# Here's another example. Consider if your archive has the
# property "stupidproperty":
# stupidproperty=can,i,be,more,clear
# Then this particular sublist, named thing, would be
# thing = ['can', 'i', 'be', 'more', 'clear']
# Lastly, consider if you have this:
# ZeroPoint=0.12341
# Then thing would be this:
# thing = ['0.12341']
thing = log.structures[0].props[thing_label]
# Deal with multiple structures by checking for this
# split here.
if ',' in thing:
# Note that the "stupidproperty" example would fail here
# because its elements can not be converted to floats.
thing = [float(x) for x in thing.split(',')]
# Here, thing might look like:
# thing = [0.1235235, 0.2352, 0.352345]
else:
# Here it would be a list with only one element.
thing = [float(thing)]
things_to_add.append(thing)
# Initialize list of zeros. Python syntax looks funny sometimes.
# The length of the things_to_add sublists should always be the
# same if you're doing it right. I suppose you could add some
# sort of assert here.
energies = [0.] * len(things_to_add[0])
# In this case, consider the earlier example where:
# things_to_add = [[0.634, 0.2352],
# [0.01234, 0.0164]]
# Here, the first thing_group would be [0.634, 0.2352] and the
# second thing_group would be [0.01234, 0.0164].
for thing_group in things_to_add:
# After the loop through the 1st thing_group, we would have
# energies = [0.634, 0.2352]. After the 2nd thing_group, we
# would have energies = [0.634 + 0.01234, 0.2352 + 0.0164].
for i, thing in enumerate(thing_group):
energies[i] += thing
energies = [x * co.HARTREE_TO_KJMOL for x in energies]
for i, e in enumerate(energies):
temp.append(datatypes.Datum(
val=e,
com='ge',
typ='e',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=i + 1))
# This works when HF and ZeroPoint are used. Had to make it more
# general.
# Revisit how structures are stored in GaussLog when you have time.
# hf = log.structures[0].props['HF']
# zp = log.structures[0].props['ZeroPoint']
# if ',' in hf:
# hfs = map(float, hf.split(','))
# zps = map(float, zp.split(','))
# else:
# hfs = [float(hf)]
# zps = [float(zp)]
# es = []
# for hf, zp in izip(hfs, zps):
# es = (hf + zp) * co.HARTREE_TO_KJMOL
# for i, e in enumerate(es):
# temp.append(datatypes.Datum(
# val=e,
# com='ge',
# typ='e',
# src_1=filename,
# idx_1=idx_1 + 1,
# idx_2=i + 1))
# Here's the old code from before we supported multiple energies.
# I think it's helpful history for new coders trying to understand
# how to write in new datatypes. Notice how the new code utilizes
# idx_2.
# hf = float(log.structures[0].props['HF'])
# zp = float(log.structures[0].props['ZeroPoint'])
# energy = (hf + zp) * co.HARTREE_TO_KJMOL
# # We don't use idx_2 since we assume there is only one structure
# # in a Gaussian .log. I think that's always the case.
# temp.append(datatypes.Datum(
# val=energy,
# com='ge',
# typ='e',
# src_1=filename,
# idx_1=idx_1 + 1))
zero = min([x.val for x in temp])
for datum in temp:
datum.val -= zero
data.extend(temp)
# MACROMODEL ENERGIES
filenames_s = coms['me']
ind = 'pre'
for idx_1, filenames in enumerate(filenames_s):
for filename in filenames:
name_mae = inps[filename].name_mae
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
indices = inps[filename]._index_output_mae
# This is list of sets. The 1st value in the set corresponds to the
# number of the structure. The 2nd value is the structure class.
selected_structures = filetypes.select_structures(
mae.structures, indices, ind)
for idx_2, structure in selected_structures:
data.append(datatypes.Datum(
val=structure.props['r_mmod_Potential_Energy-MM3*'],
com='me',
typ='e',
src_1=inps[filename].name_mae,
idx_1=idx_1 + 1,
idx_2=idx_2 + 1))
# KJK
# GAUSSIAN TO AMBER BONDS (PRE OPT)
filenames_s = coms['gab']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.extend(collect_structural_data_from_amber_geo(
filename, inps, outs, direc, 'gab', 'pre', 'bonds', idx_1 = idx_1))
data.extend(temp)
# GAUSSIAN TO AMBER ANGLES (PRE OPT)
filenames_s = coms['gaa']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.extend(collect_structural_data_from_amber_geo(
filename, inps, outs, direc, 'gaao', 'pre', 'angles', idx_1 = idx_1))
data.extend(temp)
# GAUSSIAN TO AMBER TORSIONS (PRE OPT)
filenames_s = coms['gat']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.extend(collect_structural_data_from_amber_geo(
filename, inps, outs, direc, 'gato', 'pre', 'torsions', idx_1 = idx_1))
data.extend(temp)
# GAUSSIAN TO AMBER BONDS (POST OPT)
filenames_s = coms['gabo']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.extend(collect_structural_data_from_amber_geo(
filename, inps, outs, direc, 'gabo', 'opt', 'bonds', idx_1 = idx_1))
data.extend(temp)
# GAUSSIAN TO AMBER ANGLES (POST OPT)
filenames_s = coms['gaao']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.extend(collect_structural_data_from_amber_geo(
filename, inps, outs, direc, 'gaao', 'opt', 'angles', idx_1 = idx_1))
data.extend(temp)
# GAUSSIAN TO AMBER TORSIONS (POST OPT)
filenames_s = coms['gato']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.extend(collect_structural_data_from_amber_geo(
filename, inps, outs, direc, 'gato', 'opt', 'torsions', idx_1 = idx_1))
data.extend(temp)
# AMBER BONDS
filenames_s = coms['abo']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.extend(collect_structural_data_from_amber_geo(
filename, inps, outs, direc, 'abo', 'opt', 'bonds', idx_1 = idx_1))
data.extend(temp)
# AMBER ANGLES
filenames_s = coms['aao']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.extend(collect_structural_data_from_amber_geo(
filename, inps, outs, direc, 'aao', 'opt', 'angles', idx_1 = idx_1))
data.extend(temp)
# AMBER TORSIONS
filenames_s = coms['ato']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.extend(collect_structural_data_from_amber_geo(
filename, inps, outs, direc, 'ato', 'opt', 'torsions', idx_1 = idx_1))
data.extend(temp)
# AMBER ENERGY
filenames_s = coms['ae1']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.append(collect_structural_data_from_amber_ene(
filename, inps, outs, direc, 'ae1', 'pre', 'e1', idx_1 = idx_1))
data.extend(temp)
# AMBER OPTIMIZED ENERGY
filenames_s = coms['ae1o']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.append(collect_structural_data_from_amber_ene(
filename, inps, outs, direc, 'ae1o', 'opt', 'e1o', idx_1 = idx_1))
data.extend(temp)
# AMBER HESSIAN
filenames = chain.from_iterable(coms['ah'])
for filename in filenames:
name_hes = inps[filename].name_hes
hes = check_outs(name_hes, outs, filetypes.AmberHess, direc)
hess = hes.hessian
# hessian extracted from Amber is already mass weighted
low_tri_idx = np.tril_indices_from(hess)
low_tri = hess[low_tri_idx]
int2 = []
int3 = []
int4 = []
if os.path.isfile("calc/geo.npy"):
hes_geo = None
if np.__version__ >= '1.16.4':
hes_geo = np.load("calc/geo.npy",allow_pickle=True)
else:
hes_geo = np.load("calc/geo.npy")
for ele in hes_geo:
inter = np.count_nonzero(ele)
a,b,c,d = ele
if inter == 2:
a = int(a)
b = int(b)
int2.append([a,b])
elif inter == 3:
a = int(a)
c = int(c)
int3.append([a,c])
elif inter == 4:
a = int(a)
d = int(d)
int4.append([a,d])
frozen = 0
f_atom = []
if os.path.isfile("fixedatoms.txt"):
frozen = 1
ref = open("fixedatoms.txt","r")
flines = ref.readlines()
for fline in flines:
line = fline.split()
if len(line) == 1:
f_atom.append(int(line[0]))
print("Reading fixedatoms.txt\nFixed Atom Numbers:",f_atom)
def int_wht(at_1,at_2):
"""
Weighted value for hessian matrix
default value
diagonal zero
1-2 0.031
1-3 0.031
1-4 0.31
else 0.031
"""
apair = [at_1,at_2]
if at_1 == at_2:
return 0.0
elif apair in int2:
return co.WEIGHTS['h12']
elif apair in int3:
return co.WEIGHTS['h14']
elif apair in int4:
return co.WEIGHTS['h14']
elif frozen:
if at_1 in f_atom or at_2 in f_atom:
#print("DEBUG:",at_1,at_2,f_atom)
return 0.0
else:
return 1.0
else:
return 1.0
data.extend([datatypes.Datum(
val=e,
com='ah',
typ='h',
src_1=hes.filename,
idx_1=x + 1,
idx_2=y + 1,
atm_1=int((x)//3+1),
atm_2=int((y)//3+1),
wht = int_wht(int((x)//3+1),int((y)//3+1)))
for e, x, y in zip(
low_tri, low_tri_idx[0], low_tri_idx[1])])
# AMBER ENERGIES
# filenames_s = coms['ae']
# for idx_1, filenames in enumerate(filenames_s):
# logger.log(1, '>>> idx_1: {}'.format(idx_1))
# logger.log(1, '>>> filenames: {}'.format(filenames))
# for idx_2, comma_sep_filenames in enumerate(filenames):
# name_1, name_2 = comma_sep_filenames.split(',')
# out = check_outs(
# comma_sep_filenames, outs, filetypes.AmberOut, direc)
# # Right now, path is a comma separated string.
# out.path = inps[comma_sep_filenames].out
# logger.log(1, '>>> out: {}'.format(out))
# energy = out.read_energy()
# data.append(datatypes.Datum(
# val=energy,
# com='ae',
# typ='e',
# src_1=name_1,
# src_2=name_2,
# idx_1=idx_1 + 1,
# idx_2=idx_2 + 1))
# JAGUAR AVERAGE ENERGIES
filenames_s = coms['jea']
# idx_1 is the number used to group sets of relative energies.
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
mae = check_outs(filename, outs, filetypes.Mae, direc)
# idx_2 corresponds to the structure inside the file in case the
# .mae files contains multiple structures.
for idx_2, structure in enumerate(mae.structures):
try:
energy = structure.props['r_j_Gas_Phase_Energy']
except KeyError:
energy = structure.props['r_j_QM_Energy']
energy *= co.HARTREE_TO_KJMOL
temp.append(datatypes.Datum(
val=energy,
com='jea',
typ='ea',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=idx_2 + 1))
# For this data type, we set everything relative.
avg = sum([x.val for x in temp]) / len(temp)
for datum in temp:
datum.val -= avg
data.extend(temp)
# GAUSSIAN AVERAGE ENERGIES
filename_s = coms['gea']
for idx_1, filenames in enumerate(filename_s):
temp = []
for filename in filenames:
log = check_outs(filename, outs, filetypes.GaussLog, direc)
things_to_add = []
for thing_label in co.GAUSSIAN_ENERGIES:
thing = log.structures[0].props[thing_label]
if ',' in thing:
thing = [float(x) for x in thing.split(',')]
else:
thing = [float(thing)]
things_to_add.append(thing)
energies = [0.] * len(things_to_add[0])
for thing_group in things_to_add:
for i, thing in enumerate(thing_group):
energies[i] += thing
energies = [x * co.HARTREE_TO_KJMOL for x in energies]
for i, e in enumerate(energies):
temp.append(datatypes.Datum(
val=e,
com='gea',
typ='ea',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=i + 1))
avg = sum([x.val for x in temp]) / len(temp)
for datum in temp:
datum.val -= avg
data.extend(temp)
# MACROMODEL AVERAGE ENERGIES
filenames_s = coms['mea']
ind = 'pre'
# idx_1 is the number used to group sets of relative energies.
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
name_mae = inps[filename].name_mae
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
indices = inps[filename]._index_output_mae
# This is list of sets. The 1st value in the set corresponds to the
# number of the structure. The 2nd value is the structure class.
selected_structures = filetypes.select_structures(
mae.structures, indices, ind)
for idx_2, structure in selected_structures:
temp.append(datatypes.Datum(
val=structure.props['r_mmod_Potential_Energy-MM3*'],
com='mea',
typ='ea',
src_1=inps[filename].name_mae,
idx_1=idx_1 + 1,
idx_2=idx_2 + 1))
avg = sum([x.val for x in temp]) / len(temp)
for datum in temp:
datum.val -= avg
data.extend(temp)
# JAGUAR ENERGIES COMPARED TO OPTIMIZED MM
filenames_s = coms['jeo']
# idx_1 is the number used to group sets of relative energies.
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
mae = check_outs(filename, outs, filetypes.Mae, direc)
# idx_2 corresponds to the structure inside the file in case the
# .mae files contains multiple structures.
for idx_2, structure in enumerate(mae.structures):
try:
energy = structure.props['r_j_Gas_Phase_Energy']
except KeyError:
energy = structure.props['r_j_QM_Energy']
energy *= co.HARTREE_TO_KJMOL
temp.append(datatypes.Datum(
val=energy,
com='jeo',
typ='eo',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=idx_2 + 1))
# For this data type, we set everything relative.
zero = min([x.val for x in temp])
for datum in temp:
datum.val -= zero
data.extend(temp)
# GAUSSIAN ENERGIES RELATIVE TO OPTIMIZED MM
filename_s = coms['geo']
for idx_1, filenames in enumerate(filename_s):
temp = []
for filename in filenames:
log = check_outs(filename, outs, filetypes.GaussLog, direc)
things_to_add = []
for thing_label in co.GAUSSIAN_ENERGIES:
thing = log.structures[0].props[thing_label]
if ',' in thing:
thing = [float(x) for x in thing.split(',')]
else:
thing = [float(thing)]
things_to_add.append(thing)
energies = [0.] * len(things_to_add[0])
for thing_group in things_to_add:
for i, thing in enumerate(thing_group):
energies[i] += thing
energies = [x * co.HARTREE_TO_KJMOL for x in energies]
for i, e in enumerate(energies):
temp.append(datatypes.Datum(
val=e,
com='geo',
typ='eo',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=i + 1))
zero = min([x.val for x in temp])
for datum in temp:
datum.val -= zero
data.extend(temp)
# MACROMODEL OPTIMIZED ENERGIES
filenames_s = coms['meo']
ind = 'opt'
for idx_1, filenames in enumerate(filenames_s):
for filename in filenames:
name_mae = inps[filename].name_mae
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
indices = inps[filename]._index_output_mae
selected_structures = filetypes.select_structures(
mae.structures, indices, ind)
for idx_2, structure in selected_structures:
data.append(datatypes.Datum(
val=structure.props['r_mmod_Potential_Energy-MM3*'],
com='meo',
typ='eo',
src_1=inps[filename].name_mae,
idx_1=idx_1 + 1,
idx_2=idx_2 + 1))
# JAGUAR ENERGIES RELATIVE TO AVERAGE COMPARED TO OPTIMIZED MM
filenames_s = coms['jeao']
# idx_1 is the number used to group sets of relative energies.
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
mae = check_outs(filename, outs, filetypes.Mae, direc)
# idx_2 corresponds to the structure inside the file in case the
# .mae files contains multiple structures.
for idx_2, structure in enumerate(mae.structures):
try:
energy = structure.props['r_j_Gas_Phase_Energy']
except KeyError:
energy = structure.props['r_j_QM_Energy']
energy *= co.HARTREE_TO_KJMOL
temp.append(datatypes.Datum(
val=energy,
com='jeao',
typ='eao',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=idx_2 + 1))
avg = sum([x.val for x in temp]) / len(temp)
for datum in temp:
datum.val -= avg
data.extend(temp)
# GAUSSIAN AVERAGE ENERGIES RELATIVE TO OPTIMIZED MM
filename_s = coms['geao']
for idx_1, filenames in enumerate(filename_s):
temp = []
for filename in filenames:
log = check_outs(filename, outs, filetypes.GaussLog, direc)
things_to_add = []
for thing_label in co.GAUSSIAN_ENERGIES:
thing = log.structures[0].props[thing_label]
if ',' in thing:
thing = [float(x) for x in thing.split(',')]
else:
thing = [float(thing)]
things_to_add.append(thing)
energies = [0.] * len(things_to_add[0])
for thing_group in things_to_add:
for i, thing in enumerate(thing_group):
energies[i] += thing
energies = [x * co.HARTREE_TO_KJMOL for x in energies]
for i, e in enumerate(energies):
temp.append(datatypes.Datum(
val=e,
com='geao',
typ='eao',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=i + 1))
avg = sum([x.val for x in temp]) / len(temp)
for datum in temp:
datum.val -= avg
data.extend(temp)
# MACROMODEL OPTIMIZED ENERGIES RELATIVE TO AVERAGE
filenames_s = coms['meao']
ind = 'opt'
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
name_mae = inps[filename].name_mae
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
indices = inps[filename]._index_output_mae
selected_structures = filetypes.select_structures(
mae.structures, indices, ind)
for idx_2, structure in selected_structures:
temp.append(datatypes.Datum(
val=structure.props['r_mmod_Potential_Energy-MM3*'],
com='meao',
typ='eao',
src_1=inps[filename].name_mae,
idx_1=idx_1 + 1,
idx_2=idx_2 + 1))
avg = sum([x.val for x in temp]) / len(temp)
for datum in temp:
datum.val -= avg
data.extend(temp)
# JAGUAR BONDS
filenames = chain.from_iterable(coms['jb'])
for filename in filenames:
data.extend(collect_structural_data_from_mae(
filename, inps, outs, direc, sub_names, 'jb', 'pre', 'bonds'))
# GAUSSIAN BONDS
filenames = chain.from_iterable(coms['gtb'])
for filename in filenames:
data.extend(collect_structural_data_from_tinker_log_for_gaussian(
filename, inps, outs, direc, 'gtb', 'pre', 'bonds'))
# GAUSSIAN ANGLES
filenames = chain.from_iterable(coms['gta'])
for filename in filenames:
data.extend(collect_structural_data_from_tinker_log_for_gaussian(
filename, inps, outs, direc, 'gta', 'pre', 'angles'))
# GAUSSIAN TORSIONS
filenames = chain.from_iterable(coms['gtt'])
for filename in filenames:
data.extend(collect_structural_data_from_tinker_log_for_gaussian(
filename, inps, outs, direc, 'gtt', 'pre', 'torsions'))
# TINKER SP BONDS
filenames = chain.from_iterable(coms['tb'])
for filename in filenames:
data.extend(collect_structural_data_from_tinker_log(
filename, inps, outs, direc, 'tb', 'pre', 'bonds'))
# TINKER SP ANGLES
filenames = chain.from_iterable(coms['ta'])
for filename in filenames:
data.extend(collect_structural_data_from_tinker_log(
filename, inps, outs, direc, 'ta', 'pre', 'angles'))
# TINKER SP TORSIONS
filenames = chain.from_iterable(coms['tt'])
for filename in filenames:
data.extend(collect_structural_data_from_tinker_log(
filename, inps, outs, direc, 'tt', 'pre', 'torsions'))
# TINKER OPTIMIZED BONDS
filenames = chain.from_iterable(coms['tbo'])
for filename in filenames:
data.extend(collect_structural_data_from_tinker_log(
filename, inps, outs, direc, 'tbo', 'opt', 'bonds'))
# TINKER OPTIMIZED ANGLE
filenames = chain.from_iterable(coms['tao'])
for filename in filenames:
data.extend(collect_structural_data_from_tinker_log(
filename, inps, outs, direc, 'tao', 'opt', 'angles'))
# TINKER OPTIMIZED ANGLE
filenames = chain.from_iterable(coms['tto'])
for filename in filenames:
data.extend(collect_structural_data_from_tinker_log(
filename, inps, outs, direc, 'tto', 'opt', 'torsions'))
# TINKER ENERGIES RELATIVE TO LOWEST
filenames_s = coms['te']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.append(collect_structural_data_from_tinker_log(
filename, inps, outs, direc, 'te', 'pre', 'e', idx_1 = idx_1))
zero = min([x.val for x in temp])
for datum in temp:
datum.val -= zero
data.extend(temp)
# TINKER ENERGIES RELATIVE TO AVERAGE
filenames_s = coms['tea']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.append(collect_structural_data_from_tinker_log(
filename, inps, outs, direc, 'tea', 'pre', 'ea', idx_1 = idx_1))
avg = sum([x.val for x in temp]) / len(temp)
for datum in temp:
datum.val -= avg
data.extend(temp)
# TINKER OPTIMIZED ENERGIES RELATIVE LOWEST
filenames_s = coms['teo']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.append(collect_structural_data_from_tinker_log(
filename, inps, outs, direc, 'teo', 'opt', 'eo', idx_1 = idx_1))
zero = min([x.val for x in temp])
for datum in temp:
datum.val -= zero
data.extend(temp)
# TINKER OPTIMIZED ENERGIES RELATIVE TO AVERAGE
filenames_s = coms['teao']
for idx_1, filenames in enumerate(filenames_s):
temp = []
for filename in filenames:
temp.append(collect_structural_data_from_tinker_log(
filename, inps, outs, direc, 'teao', 'opt', 'eao', idx_1 = idx_1))
avg = sum([x.val for x in temp]) / len(temp)
for datum in temp:
datum.val -= avg
data.extend(temp)
# TINKER HESSIAN
filenames = chain.from_iterable(coms['th'])
for filename in filenames:
xyz_struct = inps[filename].structures[0]
num_atoms = xyz_struct.props['total atoms']
name_hes = inps[filename].name_hes
hes = check_outs(name_hes, outs, filetypes.TinkerHess, direc)
hes.natoms = num_atoms
hess = hes.hessian
datatypes.mass_weight_hessian(hess, xyz_struct.atoms)
# Need to figure out dummy atoms at somepoint?
# I'm not even sure if we can use dummy atoms in TINKER.
low_tri_idx = np.tril_indices_from(hess)
low_tri = hess[low_tri_idx]
data.extend([datatypes.Datum(
val=e,
com='th',
typ='h',
src_1=hes.filename,
idx_1=x + 1,
idx_2=y + 1)
for e, x, y in zip(
low_tri, low_tri_idx[0], low_tri_idx[1])])
# TINKER EIGENMATRIX USING GAUSSIAN EIGENVECTORS
filenames = chain.from_iterable(coms['tgeig'])
for comma_filenames in filenames:
name_xyz, name_gau_log = comma_filenames.split(',')
name_xyz_hes = inps[name_xyz].name_hes
xyz = check_outs(name_xyz, outs, filetypes.Tinker_xyz, direc)
xyz_hes = check_outs(name_xyz_hes, outs, filetypes.TinkerHess, direc)
gau_log = check_outs(name_gau_log, outs, filetypes.GaussLog, direc)
xyz_struct = xyz.structures[0]
num_atoms = xyz_struct.props['total atoms']
xyz_hes.natoms = num_atoms
hess = xyz_hes.hessian
datatypes.mass_weight_hessian(hess, xyz_struct.atoms)
evec = gau_log.evecs
try:
eigenmatrix = np.dot(np.dot(evec, hess), evec.T)
except ValueError:
logger.warning('Matrices not aligned!')
logger.warning('Hessian retrieved from {}: {}'.format(
name_mae_log, hess.shape))
logger.warning('Eigenvectors retrieved from {}: {}'.format(
name_gau_log, evec.shape))
raise
low_tri_idx = np.tril_indices_from(eigenmatrix)
low_tri = eigenmatrix[low_tri_idx]
data.extend([datatypes.Datum(
val=e,
com='tgeig',
typ='eig',
src_1=name_xyz,
src_2=name_gau_log,
idx_1=x + 1,
idx_2=y + 1)
for e, x, y in zip(
low_tri, low_tri_idx[0], low_tri_idx[1])])
# MACROMODEL BONDS
filenames = chain.from_iterable(coms['mb'])
for filename in filenames:
data.extend(collect_structural_data_from_mae(
filename, inps, outs, direc, sub_names, 'mb', 'opt', 'bonds'))
# JAGUAR ANGLES
filenames = chain.from_iterable(coms['ja'])
for filename in filenames:
data.extend(collect_structural_data_from_mae(
filename, inps, outs, direc, sub_names, 'ja', 'pre', 'angles'))
# MACROMODEL BONDS
filenames = chain.from_iterable(coms['ma'])
for filename in filenames:
data.extend(collect_structural_data_from_mae(
filename, inps, outs, direc, sub_names, 'ma', 'opt', 'angles'))
# JAGUAR BONDS
filenames = chain.from_iterable(coms['jt'])
for filename in filenames:
data.extend(collect_structural_data_from_mae(
filename, inps, outs, direc, sub_names, 'jt', 'pre', 'torsions'))
# MACROMODEL BONDS
filenames = chain.from_iterable(coms['mt'])
for filename in filenames:
data.extend(collect_structural_data_from_mae(
filename, inps, outs, direc, sub_names, 'mt', 'opt', 'torsions'))
# JAGUAR CHARGES
filenames = chain.from_iterable(coms['jq'])
for filename in filenames:
mae = check_outs(filename, outs, filetypes.Mae, direc)
for idx_1, structure in enumerate(mae.structures):
for atom in structure.atoms:
# If it doesn't have the property b_q_use_charge,
# use it.
# If b_q_use_charge is 1, use it. If it's 0, don't
# use it.
if not 'b_q_use_charge' in atom.props or \
atom.props['b_q_use_charge']:
data.append(datatypes.Datum(
val=atom.partial_charge,
com='jq',
typ='q',
src_1=filename,
idx_1=idx_1 + 1,
atm_1=atom.index))
# MACROMODEL CHARGES
filenames = chain.from_iterable(coms['mq'])
for filename in filenames:
name_mae = inps[filename].name_mae
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
# Pick out the right structures. Sometimes our .com files
# generate many structures in a .mae, not all of which
# apply to this command.
structures = filetypes.select_structures(
mae.structures, inps[filename]._index_output_mae, 'pre')
for idx_1, structure in structures:
for atom in structure.atoms:
if not 'b_q_use_charge' in atom.props or \
atom.props['b_q_use_charge']:
data.append(datatypes.Datum(
val=atom.partial_charge,
com='mq',
typ='q',
src_1=filename,
idx_1=idx_1 + 1,
atm_1=atom.index))
# MACROMODEL+GUASSIAN ESP
filenames = chain.from_iterable(coms['mgESP'])
for comma_filenames in filenames:
charges_list = []
filename_mae, name_gau_chk = comma_filenames.split(',')
#Filename of the output *mae file (i.e. filename.q2mm.mae)
name_mae = inps[filename_mae].name_mae
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
structures = filetypes.select_structures(
mae.structures, inps[filename_mae]._index_output_mae, 'pre')
for idx_1, structure in structures:
for atom in structure.atoms:
### I think we want all the charges, right?
#if not 'b_q_use_charge' in atom.props or \
# atom.props['b_q_use_charge']:
if atom.atomic_num > 0:
charges_list.append(atom.partial_charge)
com_filename = os.path.splitext(name_gau_chk)[0] + '.ESP.q2mm.com'
inps[com_filename].charge_list = charges_list
inps[com_filename].write_com()
inps[com_filename].run_gaussian()
name_gauss_log = inps[com_filename].name_log
gauss = check_outs(name_gauss_log, outs, filetypes.GaussLog, direc)
esp_rms = gauss.esp_rms
if esp_rms < 0.0:
raise Exception('A negative RMS was obtained for the ESP fitting '
'which indicates an error occured. Look at the '
'following file: {}'.format(name_gauss_log))
data.append(datatypes.Datum(
val=esp_rms,
com='mgESP',
typ='esp',
src_1= name_mae,
src_2='gaussian',
idx_1 = 1))
# MACROMODEL+JAGUAR ESP
## This does not work, I still need to write code to support Jaguaer. -TR
filenames = chain.from_iterable(coms['mjESP'])
for comma_filenames in filenames:
charges_list = []
name_mae, name_jag_chk = comma_filenames.split(',')
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
structures = filetypes.select_structures(
mae.structures, inps[name_mae]._index_output_mae, 'pre')
for idx_1, structure in structures:
for atom in structure.atoms:
if not 'b_q_use_charge' in atom.props or \
atom.props['b_q_use_charge']:
charges_list.append(atom.partial_charge)
###Filler for ESP calculations####
### This is what is used in anna's code
current_RMS = run_ChelpG_inp.run_JCHelpG(charges_list,name_jag_chk)
### End of filler
if current_RMS < 0:
sys.exit("Error while computing RMS. Exiting")
data.append(datatypes.Datum(
val=current_RMS,
com='mjESP',
typ='esp',
src_1=name_mae,
idx_1=1))
# JAGUAR CHARGES EXCLUDING ALIPHATIC HYDROGENS
filenames = chain.from_iterable(coms['jqh'])
for filename in filenames:
mae = check_outs(filename, outs, filetypes.Mae, direc)
for idx_1, structure in enumerate(mae.structures):
aliph_hyds = structure.get_aliph_hyds()
for atom in structure.atoms:
# If it doesn't have the property b_q_use_charge,
# use it.
# If b_q_use_charge is 1, use it. If it's 0, don't
# use it.
if (not 'b_q_use_charge' in atom.props or \
atom.props['b_q_use_charge']) and \
not atom in aliph_hyds:
charge = atom.partial_charge
if atom.atom_type == 3:
for bonded_atom_index in atom.bonded_atom_indices:
bonded_atom = structure.atoms[bonded_atom_index - 1]
if bonded_atom in aliph_hyds:
charge += bonded_atom.partial_charge
data.append(datatypes.Datum(
val=charge,
com='jqh',
typ='qh',
src_1=filename,
idx_1=idx_1 + 1,
atm_1=atom.index))
# MACROMODEL CHARGES EXCLUDING ALIPHATIC HYDROGENS
filenames = chain.from_iterable(coms['mqh'])
for filename in filenames:
name_mae = inps[filename].name_mae
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
# Pick out the right structures. Sometimes our .com files
# generate many structures in a .mae, not all of which
# apply to this command.
structures = filetypes.select_structures(
mae.structures, inps[filename]._index_output_mae, 'pre')
for idx_1, structure in structures:
aliph_hyds = structure.get_aliph_hyds()
for atom in structure.atoms:
if (not 'b_q_use_charge' in atom.props or \
atom.props['b_q_use_charge']) and \
atom not in aliph_hyds:
# Since the charge is always zero AS FAR AS I KNOW, this
# whole recalculation of the charge is totally unnecessary.
# However, I want users to be aware that if a situation
# arises that goes beyond something I experienced,
# uncommenting this section, thereby making it more like the
# code for -jqh, should solve the problem.
# charge = atom.partial_charge
# if atom.atom_type == 3:
# for bonded_atom_index in atom.bonded_atom_indices:
# bonded_atom = structure.atoms[bonded_atom_index - 1]
# if bonded_atom in aliph_hyds:
# charge += bonded_atom.partial_charge
data.append(datatypes.Datum(
# val=charge,
val=atom.partial_charge,
com='mqh',
typ='qh',
src_1=filename,
idx_1=idx_1 + 1,
atm_1=atom.index))
# JAGUAR CHARGES EXCLUDING ALL SINGLE BONDED HYDROGENS
filenames = chain.from_iterable(coms['jqa'])
for filename in filenames:
mae = check_outs(filename, outs, filetypes.Mae, direc)
for idx_1, structure in enumerate(mae.structures):
hyds = structure.get_hyds()
for atom in structure.atoms:
# Check if we want to use this charge and ensure it's not a
# hydrogen.
if (not 'b_q_use_charge' in atom.props or \
atom.props['b_q_use_charge']) and \
atom not in hyds:
charge = atom.partial_charge
# Check if it's bonded to a hydrogen.
for bonded_atom_index in atom.bonded_atom_indices:
bonded_atom = structure.atoms[bonded_atom_index - 1]
if bonded_atom in hyds:
if len(bonded_atom.bonded_atom_indices) < 2:
charge += bonded_atom.partial_charge
data.append(datatypes.Datum(
val=charge,
com='jqa',
typ='qa',
src_1=filename,
idx_1=idx_1 + 1,
atm_1=atom.index))
# MACROMODEL CHARGES EXCLUDING ALL SINGLE BONDED HYDROGENS
filenames = chain.from_iterable(coms['mqa'])
for filename in filenames:
name_mae = inps[filename].name_mae
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
# Pick out the right structures. Sometimes our .com files
# generate many structures in a .mae, not all of which
# apply to this command.
structures = filetypes.select_structures(
mae.structures, inps[filename]._index_output_mae, 'pre')
for idx_1, structure in structures:
hyds = structure.get_hyds()
for atom in structure.atoms:
if (not 'b_q_use_charge' in atom.props or \
atom.props['b_q_use_charge']) and \
atom not in hyds:
charge = atom.partial_charge
for bonded_atom_index in atom.bonded_atom_indices:
bonded_atom = structure.atoms[bonded_atom_index - 1]
if bonded_atom in hyds:
if len(bonded_atom.bonded_atom_indices) < 2:
charge += bonded_atom.partial_charge
data.append(datatypes.Datum(
val=charge,
com='mqa',
typ='qa',
src_1=filename,
idx_1=idx_1 + 1,
atm_1=atom.index))
# JAGUAR HESSIAN
filenames = chain.from_iterable(coms['jh'])
for filename in filenames:
jin = check_outs(filename, outs, filetypes.JaguarIn, direc)
hess = jin.hessian
datatypes.mass_weight_hessian(hess, jin.structures[0].atoms)
if invert:
evals, evecs = np.linalg.eigh(hess)
datatypes.replace_minimum(evals, value=invert)
hess = evecs.dot(np.diag(evals).dot(evecs.T))
datatypes.replace_minimum(hess, value=invert)
low_tri_idx = np.tril_indices_from(hess)
low_tri = hess[low_tri_idx]
data.extend([datatypes.Datum(
val=e,
com='jh',
typ='h',
src_1=jin.filename,
idx_1=x + 1,
idx_2=y + 1)
for e, x, y in zip(
low_tri, low_tri_idx[0], low_tri_idx[1])])
# GAUSSIAN HESSIAN
filenames = chain.from_iterable(coms['gh'])
for filename in filenames:
log = check_outs(filename, outs, filetypes.GaussLog, direc)
log.read_archive()
# For now, the Hessian is stored on the structures inside the filetype.
hess = log.structures[0].hess
datatypes.mass_weight_hessian(hess, log.structures[0].atoms)
if invert:
# Faster to use scipy.linalg.eig or scipy.linalg.eigsh (even
# faster).
evals, evecs = np.linalg.eigh(hess)
# Returns True.
# print(np.allclose(evecs.dot(np.diag(evals).dot(evecs.T)), hess))
datatypes.replace_minimum(evals, value=invert)
hess = evecs.dot(np.diag(evals).dot(evecs.T))
# Oh crap, just realized this probably needs to be mass weighted.
# WARNING: This option may need to be mass weighted!
low_tri_idx = np.tril_indices_from(hess)
low_tri = hess[low_tri_idx]
data.extend([datatypes.Datum(
val=e,
com='gh',
typ='h',
src_1=log.filename,
idx_1=x + 1,
idx_2=y + 1)
for e, x, y in zip(
low_tri, low_tri_idx[0], low_tri_idx[1])])
# MACROMODEL HESSIAN
filenames = chain.from_iterable(coms['mh'])
for filename in filenames:
# Get the .log for the .mae.
name_log = inps[filename].name_log
# Used to get dummy atoms.
mae = check_outs(filename, outs, filetypes.Mae, direc)
# Used to get the Hessian.
log = check_outs(name_log, outs, filetypes.MacroModelLog, direc)
hess = log.hessian
dummies = mae.structures[0].get_dummy_atom_indices()
hess_dummies = datatypes.get_dummy_hessian_indices(dummies)
hess = datatypes.check_mm_dummy(hess, hess_dummies)
low_tri_idx = np.tril_indices_from(hess)
low_tri = hess[low_tri_idx]
data.extend([datatypes.Datum(
val=e,
com='mh',
typ='h',
src_1=mae.filename,
idx_1=x + 1,
idx_2=y + 1)
for e, x, y in zip(
low_tri, low_tri_idx[0], low_tri_idx[1])])
# JAGUAR EIGENMATRIX
filenames = chain.from_iterable(coms['jeigz'])
for comma_sep_filenames in filenames:
name_in, name_out = comma_sep_filenames.split(',')
jin = check_outs(name_in, outs, filetypes.JaguarIn, direc)
out = check_outs(name_out, outs, filetypes.JaguarOut, direc)
hess = jin.hessian
evec = out.eigenvectors
datatypes.mass_weight_hessian(hess, jin.structures[0].atoms)
datatypes.mass_weight_eigenvectors(evec, out.structures[0].atoms)
try:
eigenmatrix = np.dot(np.dot(evec, hess), evec.T)
except ValueError:
logger.warning('Matrices not aligned!')
logger.warning('Hessian retrieved from {}: {}'.format(
name_in, hess.shape))
logger.warning('Eigenvectors retrieved from {}: {}'.format(
name_out, evec.shape))
raise
# Funny way to make off-diagonal elements zero.
# eigenmatrix = np.diag(np.diag(eigenmatrix))
# Take diagonal into one dimensional array.
eigenmatrix = np.diag(eigenmatrix)
if invert:
datatypes.replace_minimum(eigenmatrix, value=invert)
# Turn back into a full matrix.
eigenmatrix = np.diag(eigenmatrix)
low_tri_idx = np.tril_indices_from(eigenmatrix)
low_tri = eigenmatrix[low_tri_idx]
data.extend([datatypes.Datum(
val=e,
com='jeigz',
typ='eig',
src_1=jin.filename,
src_2=out.filename,
idx_1=x + 1,
idx_2=y + 1)
for e, x, y in zip(
low_tri, low_tri_idx[0], low_tri_idx[1])])
# GAUSSIAN EIGENMATRIX
filenames = chain.from_iterable(coms['geigz'])
for filename in filenames:
log = check_outs(filename, outs, filetypes.GaussLog, direc)
evals = log.evals * co.HESSIAN_CONVERSION
if invert:
datatypes.replace_minimum(evals, value=invert)
eigenmatrix = np.diag(evals)
low_tri_idx = np.tril_indices_from(eigenmatrix)
low_tri = eigenmatrix[low_tri_idx]
data.extend([datatypes.Datum(
val=e,
com='geigz',
typ='eig',
src_1=log.filename,
idx_1=x + 1,
idx_2=y + 1)
for e, x, y in zip(
low_tri, low_tri_idx[0], low_tri_idx[1])])
# MACROMODEL EIGENMATRIX USING JAGUAR EIGENVECTORS
filenames = chain.from_iterable(coms['mjeig'])
for comma_sep_filenames in filenames:
name_mae, name_out = comma_sep_filenames.split(',')
name_log = inps[name_mae].name_log
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
log = check_outs(name_log, outs, filetypes.MacroModelLog, direc)
out = check_outs(name_out, outs, filetypes.JaguarOut, direc)
hess = log.hessian
dummies = mae.structures[0].get_dummy_atom_indices()
hess_dummies = datatypes.get_dummy_hessian_indices(dummies)
hess = datatypes.check_mm_dummy(hess, hess_dummies)
evec = out.eigenvectors
datatypes.mass_weight_eigenvectors(evec, out.structures[0].atoms)
try:
eigenmatrix = np.dot(np.dot(evec, hess), evec.T)
except ValueError:
logger.warning('Matrices not aligned!')
logger.warning('Hessian retrieved from {}: {}'.format(
log.filename, hess.shape))
logger.warning('Eigenvectors retrieved from {}: {}'.format(
name_out, evec.shape))
raise
low_tri_idx = np.tril_indices_from(eigenmatrix)
low_tri = eigenmatrix[low_tri_idx]
data.extend([datatypes.Datum(
val=e,
com='mjeig',
typ='eig',
src_1=mae.filename,
src_2=out.filename,
idx_1=x + 1,
idx_2=y + 1)
for e, x, y in zip(
low_tri, low_tri_idx[0], low_tri_idx[1])])
# MACROMODEL EIGENMATRIX USING GAUSSIAN EIGENVECTORS
filenames = chain.from_iterable(coms['mgeig'])
for comma_filenames in filenames:
name_mae, name_gau_log = comma_filenames.split(',')
name_mae_log = inps[name_mae].name_log
mae = check_outs(name_mae, outs, filetypes.Mae, direc)
mae_log = check_outs(name_mae_log, outs, filetypes.MacroModelLog, direc)
gau_log = check_outs(name_gau_log, outs, filetypes.GaussLog, direc)
hess = mae_log.hessian
dummies = mae.structures[0].get_dummy_atom_indices()
hess_dummies = datatypes.get_dummy_hessian_indices(dummies)
hess = datatypes.check_mm_dummy(hess, hess_dummies)
evec = gau_log.evecs
try:
eigenmatrix = np.dot(np.dot(evec, hess), evec.T)
except ValueError:
logger.warning('Matrices not aligned!')
logger.warning('Hessian retrieved from {}: {}'.format(
name_mae_log, hess.shape))
logger.warning('Eigenvectors retrieved from {}: {}'.format(
name_gau_log, evec.shape))
raise
low_tri_idx = np.tril_indices_from(eigenmatrix)
low_tri = eigenmatrix[low_tri_idx]
data.extend([datatypes.Datum(
val=e,
com='mgeig',
typ='eig',
src_1=name_mae,
src_2=name_gau_log,
idx_1=x + 1,
idx_2=y + 1)
for e, x, y in zip(
low_tri, low_tri_idx[0], low_tri_idx[1])])
logger.log(15, 'TOTAL DATA POINTS: {}'.format(len(data)))
return np.array(data, dtype=datatypes.Datum)
def collect_data_fake(coms, inps, direc='.', sub_names=['OPT']):
"""
Generates a random data set quickly.
"""
import random
data = []
filenames = flatten(coms.values())
for idx_1, filename in enumerate(filenames):
for idx_2 in range(5):
data.append(datatypes.Datum(
val=random.uniform(0, 10),
com='rand',
typ='a',
src_1=filename,
idx_1=idx_1 + 1,
idx_2=idx_2 + 1))
return np.array(data, dtype=datatypes.Datum)
def flatten(l):
"""
Simple means to flatten an irregular list of lists.
http://stackoverflow.com/questions/2158395/
flatten-an-irregular-list-of-lists-in-python
This goes a bit further than chain.from_iterable in that it can deal with
an arbitrary number of nested lists.
"""
# Move this?
import collections
for el in l:
if isinstance(el, collections.Iterable) and \
not isinstance(el, str):
for sub in flatten(el):
yield sub
else:
yield el
def collect_structural_data_from_mae(
name_mae, inps, outs, direc, sub_names, com, ind, typ):
"""
Repeated code used to extract structural data from .mae files (through
the generation of .mmo files).
Would be nice to reduce the number of arguments. The problem here is in
carrying through data for the generation of the Datum object.
Not going to write a pretty __doc__ for this since I want to make so
many changes. These changes will likely go along with modifications
to the classes inside filetypes.
"""
data = []
name_mmo = inps[name_mae].name_mmo
# The indices is jsut a list for the calculation done, 'pre' or 'opt'.
indices = inps[name_mae]._index_output_mmo
mmo = check_outs(name_mmo, outs, filetypes.MacroModel, direc)
selected_structures = filetypes.select_structures(
mmo.structures, indices, ind)
for idx_1, structure in selected_structures:
data.extend(structure.select_data(
typ,
com=com,
com_match=sub_names,
src_1=mmo.filename,
idx_1=idx_1 + 1))
return data
# Added by Tony.
# Probably want to use check_outs function at somepoint.
def collect_structural_data_from_tinker_log(
name_xyz, inps, outs, direc, com, ind, typ, idx_1 = None):
select_struct = {'pre':0, 'opt':1}
data = []
name_log = inps[name_xyz].name_log
log = check_outs(name_log, outs, filetypes.TinkerLog, direc)
log_structure = log.structures
struct = log_structure[select_struct[ind]]
# Stuff to try out hessian.
# xyz_struct = xyz_structure[0]
# num_atoms = xyz_struct.props['total atoms']
# hes_structure = inps[name_xyz].hess
# hes_structure.natoms = num_atoms
# hessian = hes_structure.hessian()
# Stuff to try out hessian.
if com in ['te','teo','tea','teao']:
energy = struct.props['energy']
new_datum = (datatypes.Datum(
val=energy,
typ=typ,
src_1=name_log,
idx_1=idx_1 + 1))
return(new_datum)
else:
data.extend(struct.select_data(
typ,
com=com,
src_1=name_log))
return(data)
def collect_structural_data_from_tinker_log_for_gaussian(
name_xyz, inps, outs, direc, com, ind, typ, idx_1 = None):
select_struct = {'pre':0, 'opt':1}
data = []
name_log = inps[name_xyz].name_log
log = check_outs(name_log, outs, filetypes.TinkerLog, direc)
log_structure = log.structures
struct = log_structure[select_struct[ind]]
data.extend(struct.select_data(
typ,
com=com,
src_1=name_log))
return(data)
def collect_structural_data_from_amber_geo(
name_xyz, inps, outs, direc, com, ind, typ, idx_1 = None):
select_struct = {'pre':0, 'opt':1}
data = []
name_geo = inps[name_xyz].name_geo
log = check_outs(name_geo, outs, filetypes.AmberGeo, direc) # returns classtype
log_structure = log.structures
struct = None
if len(inps) == 1:
struct = log_structure[0]
else:
struct = log_structure[select_struct[ind]]
data.extend(struct.select_data(
typ,
com=com,
src_1=name_geo))
return(data)
def collect_structural_data_from_amber_ene(
name_xyz, inps, outs, direc, com, ind, typ, idx_1 = None):
# Problem with input only 1 file
select_struct = {'pre':0, 'opt':1}
data = []
name_ene = inps[name_xyz].name_ene
log = check_outs(name_ene, outs, filetypes.AmberEne, direc) # returns classtype
log_structure = log.structures
struct = None
if len(inps) == 1:
struct = log_structure[0]
else:
struct = log_structure[select_struct[ind]]
if com in ['ae','aeo','aea','aeao','ae1','ae1o']:
energy = struct.props['energy']
new_datum = (datatypes.Datum(
val=energy,
typ=typ,
src_1=name_ene,
idx_1=idx_1 + 1))
return(new_datum)
else:
data.extend(struct.select_data(
typ,
com=com,
src_1=name_ene))
return(data)
def sort_commands_by_filename(commands):
'''
Takes a dictionary of commands like...
{'me': [['a1.01.mae', 'a2.01.mae', 'a3.01.mae'],
['b1.01.mae', 'b2.01.mae']],
'mb': [['a1.01.mae'], ['b1.01.mae']],
'jeig': [['a1.01.in,a1.out', 'b1.01.in,b1.out']]
}
... and turn it into a dictionary that looks like...
{'a1.01.mae': ['me', 'mb'],
'a1.01.in': ['jeig'],
'a1.out': ['jeig'],
'a2.01.mae': ['me'],
'a3.01.mae': ['me'],
'b1.01.mae': ['me', 'mb'],
'b1.01.in': ['jeig'],
'b1.out': ['jeig'],
'b2.01.mae': ['me']
}
Arguments
---------
commands : dic
Returns
-------
dictionary of the sorted commands
'''
sorted_commands = {}
for command, groups_filenames in commands.items():
for comma_separated in chain.from_iterable(groups_filenames):
for filename in comma_separated.split(','):
if filename in sorted_commands:
sorted_commands[filename].append(command)
else:
sorted_commands[filename] = [command]
return sorted_commands
# Will also have to be updated. Maybe the Datum class too and how it responds
# to assigning labels.
## Why is this here? Is this deprecated? -Tony
def read_reference(filename):
data = []
with open(filename, 'r') as f:
for line in f:
# Skip certain lines.
if line.startswith('-'):
continue
# Remove everything following a # in a line.
line = line.partition('#')[0]
cols = line.split()
# There should always be 3 columns.
if len(cols) == 3:
lbl, wht, val = cols
datum = datatypes.Datum(lbl=lbl, wht=float(wht), val=float(val))
lbl_to_data_attrs(datum, lbl)
data.append(datum)
data = data.sort(key=datatypes.datum_sort_key)
return np.array(data)
## This is also part of the read_reference function above, but I think these
## labels and attributes are important for handleing data.
# Shouldn't be necessary anymore.
# This should be based by the datum type and not the length of the parts list.
def lbl_to_data_attrs(datum, lbl):
parts = lbl.split('_')
datum.typ = parts[0]
# if len(parts) == 3:
if datum.typ in ['e','eo','ea','eao','eig','h','q','qh','qa']:
idxs = parts[-1]
# if len(parts) == 4:
if datum.typ in ['b','t','a']:
idxs = parts[-2]
atm_nums = parts[-1]
atm_nums = atm_nums.split('-')
for i, atm_num in enumerate(atm_nums):
setattr(datum, 'atm_{}'.format(i+1), int(atm_num))
if datum.typ in ['p']:
datum.src_1 = parts[1]
idxs = parts[-1]
if datum.typ in ['esp']:
datum.src_1 = parts[1]
idxs = parts[-1]
idxs = idxs.split('-')
datum.idx_1 = int(idxs[0])
if len(idxs) == 2:
datum.idx_2 == int(idxs[1])
# Right now, this only looks good if the logger doesn't append each log
# message with something (module, date/time, etc.).
# It would be great if this output looked good regardless of the settings
# used for the logger.
# That goes for all of these pretty output functions that use TextWrapper.
def pretty_commands_for_files(commands_for_files, log_level=5):
"""
Logs the .mae commands dictionary, or the all of the commands
used on a particular file.
Arguments
---------
commands_for_files : dic
log_level : int
"""
if logger.getEffectiveLevel() <= log_level:
foobar = TextWrapper(
width=48, subsequent_indent=' '*26)
logger.log(
log_level,
'--' + ' FILENAME '.center(22, '-') +
'--' + ' COMMANDS '.center(22, '-') +
'--')
for filename, commands in commands_for_files.items():
foobar.initial_indent = ' {:22s} '.format(filename)
logger.log(log_level, foobar.fill(' '.join(commands)))
logger.log(log_level, '-'*50)
def pretty_all_commands(commands, log_level=5):
"""
Logs the arguments/commands given to calculate that are used
to request particular datatypes from particular files.
Arguments
---------
commands : dic
log_level : int
"""
if logger.getEffectiveLevel() <= log_level:
foobar = TextWrapper(width=48, subsequent_indent=' '*24)
logger.log(log_level, '')
logger.log(
log_level,
'--' + ' COMMAND '.center(9, '-') +
'--' + ' GROUP # '.center(9, '-') +
'--' + ' FILENAMES '.center(24, '-') +
'--')
for command, groups_filenames in commands.items():
for i, filenames in enumerate(groups_filenames):
if i == 0:
foobar.initial_indent = \
' {:9s} {:^9d} '.format(command, i+1)
else:
foobar.initial_indent = \
' ' + ' '*9 + ' ' + '{:^9d} '.format(i+1)
logger.log(log_level, foobar.fill(' '.join(filenames)))
logger.log(log_level, '-'*50)
def pretty_data(data, log_level=20):
"""
Logs data as a table.
Arguments
---------
data : list of Datum
log_level : int
"""
# Really, this should check every data point instead of only the 1st.
if not data[0].wht:
compare.import_weights(data)
if log_level:
string = ('--' + ' LABEL '.center(22, '-') +
'--' + ' WEIGHT '.center(22, '-') +
'--' + ' VALUE '.center(22, '-') +
'--')
logger.log(log_level, string)
for d in data:
if d.wht or d.wht == 0:
string = (' ' + '{:22s}'.format(d.lbl) +
' ' + '{:22.4f}'.format(d.wht) +
' ' + '{:22.4f}'.format(d.val))
else:
string = (' ' + '{:22s}'.format(d.lbl) +
' ' + '{:22.4f}'.format(d.val))
if log_level:
logger.log(log_level, string)
else:
print(string)
if log_level:
logger.log(log_level, '-' * 50)
if __name__ == '__main__':
logging.config.dictConfig(co.LOG_SETTINGS)
main(sys.argv[1:])
|
ericchansen/q2mm
|
q2mm/calculate.py
|
Python
|
mit
| 100,623
|
[
"Amber",
"Gaussian",
"Jaguar",
"MacroModel",
"TINKER"
] |
bcbecab89f96548890f5675461e23d67f336d5ed5657a6ede3360f21d7789850
|
import os
import py
from click.testing import CliRunner
from logstapo.cli import main
TEST_MAIL_BODY = '''
Logstapo results for 'syslog'
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Unusual lines
-------------
Dec 28 01:30:12 hydra login[3728]: pam_unix(login:session): session opened for user root by LOGIN(uid=0)
Dec 28 01:30:12 hydra login[3766]: ROOT LOGIN on '/dev/tty1'
Jan 9 23:57:31 hydra root[21527]: test
Dec 28 01:29:46 hydra sshd[3702]: Server listening on 0.0.0.0 port 22.
Dec 28 01:29:46 hydra sshd[3702]: Server listening on :: port 22.
Dec 28 01:57:53 hydra sshd[3795]: Accepted publickey for root from fe80::123 port 45459 ssh2: RSA SHA256:whatever
Dec 28 02:49:00 hydra su[9915]: Successful su for root by root
Dec 28 02:49:00 hydra su[9915]: + /dev/pts/3 root:root
Dec 28 02:49:00 hydra su[9915]: pam_unix(su:session): session opened for user root by (uid=0)
Jan 10 01:48:26 hydra test: meow!
Dec 28 02:19:53 hydra useradd[18048]: new user: name=ntp, UID=123, GID=123, home=/dev/null, shell=/sbin/nologin
'''.strip()
TEST_MAIL_BODY_2 = '''
Logstapo results for 'kernel'
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Unusual lines
-------------
Jan 17 00:25:00 hydra kernel: [1220303.475530] e1000e: lan0 NIC Link is Down
Jan 17 00:25:01 hydra kernel: [1220306.643383] e1000e: lan0 NIC Link is Up 1000 Mbps Full Duplex, Flow Control: Rx/Tx
'''.strip()
TEST_LOG_APPEND = '''
Jan 17 00:25:00 hydra kernel: [1220303.475530] e1000e: lan0 NIC Link is Down
Jan 17 00:25:01 hydra kernel: [1220306.643383] e1000e: lan0 NIC Link is Up 1000 Mbps Full Duplex, Flow Control: Rx/Tx
'''.strip()
def test_logstapo(tmpdir, smtpserver):
testdir = py.path.local(os.path.dirname(__file__))
logdir = tmpdir.join('logs')
logdir.mkdir()
for file in testdir.join('logs').visit():
file.copy(logdir)
config = tmpdir.join('logstapo.yml')
config_yaml = (testdir.join('logstapo_test.yml').read_text('ascii')
.replace('$LOGDIR', logdir.strpath)
.replace('$SMTP_HOST', smtpserver.addr[0])
.replace('$SMTP_PORT', str(smtpserver.addr[1])))
config.write(config_yaml)
runner = CliRunner()
rv = runner.invoke(main, ['-c', config.strpath], catch_exceptions=False)
assert not rv.output
assert rv.exit_code == 0
assert len(smtpserver.outbox) == 1
mail = smtpserver.outbox.pop()
assert mail['Subject'] == 'Found unusual log entries'
assert mail['From'] == 'root@example.com'
assert mail['To'] == 'admins@example.com, root@example.com'
assert mail.get_payload() == TEST_MAIL_BODY
# write something new to the log
logdir.join('kernel.log').write(TEST_LOG_APPEND + '\n', 'a')
# second run, should cause another email with the new line
rv = runner.invoke(main, ['-c', config.strpath], catch_exceptions=False)
assert not rv.output
assert rv.exit_code == 0
assert len(smtpserver.outbox) == 1
mail = smtpserver.outbox.pop()
assert mail.get_payload() == TEST_MAIL_BODY_2
# run again - no new lines -> no emails
rv = runner.invoke(main, ['-c', config.strpath], catch_exceptions=False)
assert not rv.output
assert rv.exit_code == 0
assert len(smtpserver.outbox) == 0
|
ThiefMaster/logstapo
|
tests/test_logstapo.py
|
Python
|
mit
| 3,208
|
[
"VisIt"
] |
f7e5219d271cd0b7819f298c183b0a55cace86476286e234e23dd20c5d51cdc2
|
from __future__ import absolute_import
import sys
from subprocess import call, check_output
sys.path.append("../lib")
sys.path.append("../workers")
from flask import Flask, request, jsonify, send_file
import idigbio
import uuid
from database import Database, Sequence, Result
from align.align import pipeline
app = Flask(__name__)
#http://flask.pocoo.org/snippets/56/
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@app.route('/tree/build', methods=["GET", "POST"])
@crossdomain(origin='*')
def tree_build():
opts = {}
# Hardcoded options, potentially expose
opts["data_file"] = "data.nex"
opts["seq_type"] = "dna"
opts["fields"] = ["uuid"]
opts["sort"] = ["uuid"]
opts["rq"] = request.args.get("rq")
opts["limit"] = request.args.get("limit", 10)
# Generate a uuid job id
opts["job_id"] = str(uuid.uuid4())
idb = idigbio.json()
results = idb.search_records(rq=opts["rq"], limit=opts["limit"],
fields=opts["fields"], sort=opts["sort"])
idb_uuids = []
for rec in results["items"]:
idb_uuids.append(rec["indexTerms"]["uuid"])
db = Database()
opts["raw_seqs"] = {}
for seq in db.sess.query(Sequence).filter(Sequence.idb_uuid.in_(idb_uuids)).filter(Sequence.can_use == True):
# The "-" char messes up MrBayes even in the taxon name string field.
# Change that here and it will percolate through the output without
# affecting the data sources on the front end.
opts["raw_seqs"][seq.idb_uuid.replace("-", "_")] = seq.seq
pipeline.delay(opts)
return jsonify({"job_id": opts["job_id"], "raw_seqs": opts["raw_seqs"], "rq": opts["rq"]})
@app.route('/tree/view/<string:job_id>')
@crossdomain(origin='*')
def tree_view(job_id, methods=["GET", "POST"]):
db = Database()
trees = db.sess.query(Result).\
filter(Result.job_id==job_id).\
filter(Result.prog=="mrbayes").first()
if trees is not None:
return jsonify({"job_id": job_id, "tree": trees.result, "status":"done"})
else:
return jsonify({"job_id": job_id, "tree": "", "status": "pending"})
@app.route('/tree/render/<string:job_id>')
@crossdomain(origin='*')
def tree_render(job_id, methods=["GET", "POST"]):
out_fn = "/tmp/image.svg"
db = Database()
trees = db.sess.query(Result).\
filter(Result.job_id==job_id).\
filter(Result.prog=="mrbayes").first()
aligned = db.sess.query(Result).\
filter(Result.job_id==job_id).\
filter(Result.prog=="clustalo").first()
if trees is not None:
# Convert NEXUS tree to Newick format with BioPerl script, was the only
# thing I could find that would parse a MrBayes tree file.
t = check_output("echo '{0}' | /usr/bin/bp_nexus2nh".format(trees.result), shell=True)
s = aligned.result
# Create tree, have to use a process call to run PhyloTree.render()
# inside an X framebuffer since it uses Qt4 to render.
# https://github.com/jhcepas/ete/issues/101
call("/usr/bin/xvfb-run bin/ete_render.py '{0}' '{1}' '{2}'".format(t, s, out_fn), shell=True)
return send_file(out_fn)
else:
return jsonify({"status":False})
if __name__ == '__main__':
app.debug = True
app.run(host="0.0.0.0", port=8080)
|
mjcollin/idigphylo
|
api/api.py
|
Python
|
mit
| 4,984
|
[
"BioPerl"
] |
592fc2c39bcaf1796a00de6b96ec78a833c17de375715ee2523e150cb33b9f8c
|
../../../../share/pyshared/orca/eventsynthesizer.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/eventsynthesizer.py
|
Python
|
gpl-3.0
| 51
|
[
"ORCA"
] |
0a51ed17367e8f0b93dd283c2ba70a66aa2c2d0cc958dec14ed0c7846e2f25ca
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import dft
from pyscf import fci
from pyscf import mcscf
b = 1.4
mol = gto.M(
verbose = 5,
output = '/dev/null',
atom = [
['N',( 0.000000, 0.000000, -b/2)],
['N',( 0.000000, 0.000000, b/2)], ],
basis = {'N': '631g', },
)
m = scf.RHF(mol)
m.conv_tol = 1e-10
m.scf()
mc0 = mcscf.CASSCF(m, 4, 4).run()
molsym = gto.M(
verbose = 5,
output = '/dev/null',
atom = [
['N',( 0.000000, 0.000000, -b/2)],
['N',( 0.000000, 0.000000, b/2)], ],
basis = {'N': '631g', },
symmetry = True
)
msym = scf.RHF(molsym)
msym.conv_tol = 1e-10
msym.scf()
def tearDownModule():
global mol, molsym, m, msym, mc0
mol.stdout.close()
molsym.stdout.close()
del mol, molsym, m, msym, mc0
class KnownValues(unittest.TestCase):
def test_with_x2c_scanner(self):
mc1 = mcscf.CASSCF(m, 4, 4).x2c().run()
self.assertAlmostEqual(mc1.e_tot, -108.91497905985173, 7)
mc1 = mcscf.CASSCF(m, 4, 4).x2c().as_scanner().as_scanner()
mc1(mol)
self.assertAlmostEqual(mc1.e_tot, -108.91497905985173, 7)
mc1('N 0 0 0; N 0 0 1.1')
self.assertAlmostEqual(mc1.e_tot, -109.02535605303684, 7)
def test_mc1step_symm_with_x2c_scanner(self):
mc1 = mcscf.CASSCF(msym, 4, 4).x2c().run()
self.assertAlmostEqual(mc1.e_tot, -108.91497905985173, 7)
mc1 = mcscf.CASSCF(msym, 4, 4).x2c().as_scanner().as_scanner()
mc1(molsym)
self.assertAlmostEqual(mc1.e_tot, -108.91497905985173, 7)
mc1('N 0 0 0; N 0 0 1.1')
self.assertAlmostEqual(mc1.e_tot, -109.02535605303684, 7)
def test_0core_0virtual(self):
mol = gto.M(atom='He', basis='321g')
mf = scf.RHF(mol).run()
mc1 = mcscf.CASSCF(mf, 2, 2).run()
self.assertAlmostEqual(mc1.e_tot, -2.850576699649737, 9)
mc1 = mcscf.CASSCF(mf, 1, 2).run()
self.assertAlmostEqual(mc1.e_tot, -2.8356798736405673, 9)
mc1 = mcscf.CASSCF(mf, 1, 0).run()
self.assertAlmostEqual(mc1.e_tot, -2.8356798736405673, 9)
mc1 = mcscf.CASSCF(mf, 2, 2)
mc1.mc2step()
self.assertAlmostEqual(mc1.e_tot, -2.850576699649737, 9)
mc1 = mcscf.CASSCF(mf, 1, 2)
mc1.mc2step()
self.assertAlmostEqual(mc1.e_tot, -2.8356798736405673, 9)
mc1 = mcscf.CASSCF(mf, 1, 0)
mc1.mc2step()
self.assertAlmostEqual(mc1.e_tot, -2.8356798736405673, 9)
def test_cas_natorb(self):
mc1 = mcscf.CASSCF(msym, 4, 4, ncore=5)
mo = mc1.sort_mo([4,5,10,13])
mc1.sorting_mo_energy = True
mc1.kernel(mo)
mo0 = mc1.mo_coeff
ci0 = mc1.ci
self.assertAlmostEqual(mc1.e_tot, -108.7288793597413, 8)
casdm1 = mc1.fcisolver.make_rdm1(mc1.ci, 4, 4)
mc1.ci = None # Force cas_natorb_ to recompute CI coefficients
mc1.cas_natorb_(casdm1=casdm1, eris=mc1.ao2mo())
mo1 = mc1.mo_coeff
ci1 = mc1.ci
s = numpy.einsum('pi,pq,qj->ij', mo0[:,5:9], msym.get_ovlp(), mo1[:,5:9])
self.assertAlmostEqual(fci.addons.overlap(ci0, ci1, 4, 4, s), 1, 9)
def test_get_h2eff(self):
mc1 = mcscf.CASSCF(m, 4, 4).approx_hessian()
eri1 = mc1.get_h2eff(m.mo_coeff[:,5:9])
eri2 = mc1.get_h2cas(m.mo_coeff[:,5:9])
self.assertAlmostEqual(abs(eri1-eri2).max(), 0, 12)
mc1 = mcscf.density_fit(mcscf.CASSCF(m, 4, 4))
eri1 = mc1.get_h2eff(m.mo_coeff[:,5:9])
eri2 = mc1.get_h2cas(m.mo_coeff[:,5:9])
self.assertTrue(abs(eri1-eri2).max() > 1e-5)
def test_get_veff(self):
mf = m.view(dft.rks.RKS)
mc1 = mcscf.CASSCF(mf, 4, 4)
nao = mol.nao_nr()
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
veff1 = mc1.get_veff(mol, dm)
veff2 = m.get_veff(mol, dm)
self.assertAlmostEqual(abs(veff1-veff2).max(), 0, 12)
def test_state_average(self):
mc1 = mcscf.CASSCF(m, 4, 4).state_average_((0.5,0.5))
mc1.natorb = True
mc1.kernel()
self.assertAlmostEqual(numpy.dot(mc1.e_states, [.5,.5]), -108.80445340617777, 8)
mo_occ = lib.chkfile.load(mc1.chkfile, 'mcscf/mo_occ')[5:9]
self.assertAlmostEqual(lib.finger(mo_occ), 1.8748844779923917, 4)
dm1 = mc1.analyze()
self.assertAlmostEqual(lib.finger(dm1[0]), 2.6993157521103779, 4)
self.assertAlmostEqual(lib.finger(dm1[1]), 2.6993157521103779, 4)
def test_natorb(self):
mc1 = mcscf.CASSCF(msym, 4, 4)
mo = mc1.sort_mo_by_irrep({'A1u':2, 'A1g':2})
mc1.natorb = True
mc1.conv_tol = 1e-10
mc1.kernel(mo)
mo_occ = lib.chkfile.load(mc1.chkfile, 'mcscf/mo_occ')[5:9]
self.assertAlmostEqual(mc1.e_tot, -105.83025103050596, 9)
self.assertAlmostEqual(lib.finger(mo_occ), 2.4188178285392317, 4)
mc1.mc2step(mo)
mo_occ = lib.chkfile.load(mc1.chkfile, 'mcscf/mo_occ')[5:9]
self.assertAlmostEqual(mc1.e_tot, -105.83025103050596, 9)
self.assertAlmostEqual(lib.finger(mo_occ), 2.418822007439851, 4)
def test_dep4(self):
mc1 = mcscf.CASSCF(msym, 4, 4)
mo = mc1.sort_mo_by_irrep({'A1u':2, 'A1g':2})
mc1.with_dep4 = True
mc1.max_cycle = 1
mc1.max_cycle_micro = 6
mc1.kernel(mo)
self.assertAlmostEqual(mc1.e_tot, -105.8292690292608, 8)
def test_dep4_df(self):
mc1 = mcscf.CASSCF(msym, 4, 4).density_fit()
mo = mc1.sort_mo_by_irrep({'A1u':2, 'A1g':2})
mc1.with_dep4 = True
mc1.max_cycle = 1
mc1.max_cycle_micro = 6
mc1.kernel(mo)
self.assertAlmostEqual(mc1.e_tot, -105.82923271851176, 8)
# FIXME: How to test ci_response_space? The test below seems numerical instable
#def test_ci_response_space(self):
# mc1 = mcscf.CASSCF(m, 4, 4)
# mc1.ci_response_space = 9
# mc1.max_cycle = 1
# mc1.max_cycle_micro = 2
# mc1.kernel()
# self.assertAlmostEqual(mc1.e_tot, -108.85920100433893, 8)
# mc1 = mcscf.CASSCF(m, 4, 4)
# mc1.ci_response_space = 1
# mc1.max_cycle = 1
# mc1.max_cycle_micro = 2
# mc1.kernel()
# self.assertAlmostEqual(mc1.e_tot, -108.85920400781617, 8)
def test_chk(self):
mc2 = mcscf.CASSCF(m, 4, 4)
mc2.update(mc0.chkfile)
mc2.max_cycle = 0
mc2.kernel()
self.assertAlmostEqual(mc0.e_tot, mc2.e_tot, 8)
def test_grad(self):
self.assertAlmostEqual(abs(mc0.get_grad()).max(), 0, 4)
def test_external_fcisolver(self):
fcisolver1 = fci.direct_spin1.FCISolver(mol)
class FCI_as_DMRG(fci.direct_spin1.FCISolver):
def __getattribute__(self, attr):
"""Prevent 'private' attribute access"""
if attr in ('make_rdm1s', 'spin_square', 'contract_2e',
'absorb_h1e'):
raise AttributeError
else:
return object.__getattribute__(self, attr)
def kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
mc1 = mcscf.CASSCF(m, 4, 4)
mc1.fcisolver = FCI_as_DMRG(mol)
mc1.natorb = True
mc1.kernel()
self.assertAlmostEqual(mc1.e_tot, -108.85974001740854, 8)
dm1 = mc1.analyze(with_meta_lowdin=False)
self.assertAlmostEqual(lib.finger(dm1[0]), 5.33303, 4)
def test_casci_in_casscf(self):
mc1 = mcscf.CASSCF(m, 4, 4)
e_tot, e_ci, fcivec = mc1.casci(mc1.mo_coeff)
self.assertAlmostEqual(e_tot, -108.83741684447352, 9)
def test_scanner(self):
mc_scan = mcscf.CASSCF(scf.RHF(mol), 4, 4).as_scanner().as_scanner()
mc_scan(mol)
self.assertAlmostEqual(mc_scan.e_tot, -108.85974001740854, 8)
def test_trust_region(self):
mc1 = mcscf.CASSCF(msym, 4, 4)
mc1.max_stepsize = 0.1
mo = mc1.sort_mo_by_irrep({'A1u':3, 'A1g':1})
mc1.ah_grad_trust_region = 0.3
mc1.conv_tol = 1e-7
tot_jk = []
def count_jk(envs):
tot_jk.append(envs.get('njk', 0))
mc1.callback = count_jk
mc1.kernel(mo)
self.assertAlmostEqual(mc1.e_tot, -105.82941031838349, 8)
self.assertEqual(tot_jk, [3,6,6,4,4,3,6,6,3,6,6,3,4,4,3,3,3,3,4,4])
def test_with_ci_init_guess(self):
mc1 = mcscf.CASSCF(msym, 4, 4)
ci0 = numpy.zeros((6,6))
ci0[0,1] = 1
mc1.kernel(ci0=ci0)
mc2 = mcscf.CASSCF(msym, 4, 4)
mc2.wfnsym = 'A1u'
mc2.kernel()
self.assertAlmostEqual(mc1.e_tot, mc2.e_tot, 8)
def test_dump_chk(self):
mcdic = lib.chkfile.load(mc0.chkfile, 'mcscf')
mcscf.chkfile.dump_mcscf(mc0, **mcdic)
def test_state_average1(self):
mc = mcscf.CASSCF(m, 4, 4)
mc.state_average_([0.5, 0.25, 0.25])
mc.fcisolver.spin = 2
mc.run()
self.assertAlmostEqual(mc.e_states[0], -108.7513784239438, 6)
self.assertAlmostEqual(mc.e_states[1], -108.6919327057737, 6)
self.assertAlmostEqual(mc.e_states[2], -108.6919327057737, 6)
mc.analyze()
mo_coeff, civec, mo_occ = mc.cas_natorb(sort=True)
mc = mcscf.CASCI(m, 4, 4)
mc.state_average_([0.5, 0.25, 0.25])
mc.fcisolver.spin = 2
mc.kernel(mo_coeff=mo_coeff)
self.assertAlmostEqual(mc.e_states[0], -108.7513784239438, 6)
self.assertAlmostEqual(mc.e_states[1], -108.6919327057737, 6)
self.assertAlmostEqual(mc.e_states[2], -108.6919327057737, 6)
self.assertAlmostEqual(abs((civec[0]*mc.ci[0]).sum()), 1, 7)
# Second and third root are degenerated
#self.assertAlmostEqual(abs((civec[1]*mc.ci[1]).sum()), 1, 7)
def test_state_average_mix(self):
mc = mcscf.CASSCF(m, 4, 4)
cis1 = copy.copy(mc.fcisolver)
cis1.spin = 2
mc = mcscf.addons.state_average_mix(mc, [cis1, mc.fcisolver], [.5, .5])
mc.run()
self.assertAlmostEqual(mc.e_states[0], -108.7506795311190, 5)
self.assertAlmostEqual(mc.e_states[1], -108.8582272809495, 5)
mc.analyze()
mo_coeff, civec, mo_occ = mc.cas_natorb(sort=True)
mc.kernel(mo_coeff=mo_coeff)
self.assertAlmostEqual(mc.e_states[0], -108.7506795311190, 5)
self.assertAlmostEqual(mc.e_states[1], -108.8582272809495, 5)
self.assertAlmostEqual(abs((civec[0]*mc.ci[0]).sum()), 1, 7)
self.assertAlmostEqual(abs((civec[1]*mc.ci[1]).sum()), 1, 7)
if __name__ == "__main__":
print("Full Tests for mc1step")
unittest.main()
|
sunqm/pyscf
|
pyscf/mcscf/test/test_mc1step.py
|
Python
|
apache-2.0
| 11,340
|
[
"PySCF"
] |
b974fe42691916db17fe5cb0a969b888a02853f45d6cf503119fafb729ce4374
|
"""Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=True) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=True) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib.parse
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = [
"knownfiles", "inited", "MimeTypes",
"guess_type", "guess_all_extensions", "guess_extension",
"add_type", "init", "read_mime_types",
"suffix_map", "encodings_map", "types_map", "common_types"
]
knownfiles = [
"/etc/mime.types",
"/etc/httpd/mime.types", # Mac OS X
"/etc/httpd/conf/mime.types", # Apache
"/etc/apache/mime.types", # Apache 1
"/etc/apache2/mime.types", # Apache 2
"/usr/local/etc/httpd/conf/mime.types",
"/usr/local/lib/netscape/mime.types",
"/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
"/usr/local/etc/mime.types", # Apache 1.3
]
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
if not inited:
init()
self.encodings_map = _encodings_map_default.copy()
self.suffix_map = _suffix_map_default.copy()
self.types_map = ({}, {}) # dict for (non-strict, strict)
self.types_map_inv = ({}, {})
for (ext, type) in _types_map_default.items():
self.add_type(type, ext, True)
for (ext, type) in _common_types_default.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
url = os.fspath(url)
scheme, url = urllib.parse.splittype(url)
if scheme == 'data':
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
# type/subtype defaults to "text/plain"
comma = url.find(',')
if comma < 0:
# bad data URL
return None, None
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return type, None # never compressed, so encoding is None
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
elif strict:
return None, encoding
types_map = self.types_map[False]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
else:
return None, encoding
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename, encoding='utf-8') as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
# Windows only
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except OSError:
break
else:
if '\0' not in ctype:
yield ctype
i += 1
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr:
for subkeyname in enum_types(hkcr):
try:
with _winreg.OpenKey(hkcr, subkeyname) as subkey:
# Only check file extensions
if not subkeyname.startswith("."):
continue
# raises OSError if no 'Content Type' value
mimetype, datatype = _winreg.QueryValueEx(
subkey, 'Content Type')
if datatype != _winreg.REG_SZ:
continue
self.add_type(mimetype, subkeyname, strict)
except OSError:
continue
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
if files is None or _db is None:
db = MimeTypes()
if _winreg:
db.read_windows_registry()
if files is None:
files = knownfiles
else:
files = knownfiles + list(files)
else:
db = _db
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
def read_mime_types(file):
try:
f = open(file)
except OSError:
return None
with f:
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global suffix_map, _suffix_map_default
global encodings_map, _encodings_map_default
global types_map, _types_map_default
global common_types, _common_types_default
suffix_map = _suffix_map_default = {
'.svgz': '.svg.gz',
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
'.txz': '.tar.xz',
}
encodings_map = _encodings_map_default = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
'.xz': 'xz',
}
# Before adding new types, make sure they are either registered with IANA,
# at http://www.iana.org/assignments/media-types
# or extensions, i.e. using the x- prefix
# If you add to these, please keep them sorted by mime type.
# Make sure the entry with the preferred file extension for a particular mime type
# appears before any others of the same mimetype.
types_map = _types_map_default = {
'.js' : 'application/javascript',
'.mjs' : 'application/javascript',
'.json' : 'application/json',
'.doc' : 'application/msword',
'.dot' : 'application/msword',
'.wiz' : 'application/msword',
'.bin' : 'application/octet-stream',
'.a' : 'application/octet-stream',
'.dll' : 'application/octet-stream',
'.exe' : 'application/octet-stream',
'.o' : 'application/octet-stream',
'.obj' : 'application/octet-stream',
'.so' : 'application/octet-stream',
'.oda' : 'application/oda',
'.pdf' : 'application/pdf',
'.p7c' : 'application/pkcs7-mime',
'.ps' : 'application/postscript',
'.ai' : 'application/postscript',
'.eps' : 'application/postscript',
'.m3u' : 'application/vnd.apple.mpegurl',
'.m3u8' : 'application/vnd.apple.mpegurl',
'.xls' : 'application/vnd.ms-excel',
'.xlb' : 'application/vnd.ms-excel',
'.ppt' : 'application/vnd.ms-powerpoint',
'.pot' : 'application/vnd.ms-powerpoint',
'.ppa' : 'application/vnd.ms-powerpoint',
'.pps' : 'application/vnd.ms-powerpoint',
'.pwz' : 'application/vnd.ms-powerpoint',
'.wasm' : 'application/wasm',
'.bcpio' : 'application/x-bcpio',
'.cpio' : 'application/x-cpio',
'.csh' : 'application/x-csh',
'.dvi' : 'application/x-dvi',
'.gtar' : 'application/x-gtar',
'.hdf' : 'application/x-hdf',
'.latex' : 'application/x-latex',
'.mif' : 'application/x-mif',
'.cdf' : 'application/x-netcdf',
'.nc' : 'application/x-netcdf',
'.p12' : 'application/x-pkcs12',
'.pfx' : 'application/x-pkcs12',
'.ram' : 'application/x-pn-realaudio',
'.pyc' : 'application/x-python-code',
'.pyo' : 'application/x-python-code',
'.sh' : 'application/x-sh',
'.shar' : 'application/x-shar',
'.swf' : 'application/x-shockwave-flash',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc' : 'application/x-sv4crc',
'.tar' : 'application/x-tar',
'.tcl' : 'application/x-tcl',
'.tex' : 'application/x-tex',
'.texi' : 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.roff' : 'application/x-troff',
'.t' : 'application/x-troff',
'.tr' : 'application/x-troff',
'.man' : 'application/x-troff-man',
'.me' : 'application/x-troff-me',
'.ms' : 'application/x-troff-ms',
'.ustar' : 'application/x-ustar',
'.src' : 'application/x-wais-source',
'.xsl' : 'application/xml',
'.rdf' : 'application/xml',
'.wsdl' : 'application/xml',
'.xpdl' : 'application/xml',
'.zip' : 'application/zip',
'.au' : 'audio/basic',
'.snd' : 'audio/basic',
'.mp3' : 'audio/mpeg',
'.mp2' : 'audio/mpeg',
'.aif' : 'audio/x-aiff',
'.aifc' : 'audio/x-aiff',
'.aiff' : 'audio/x-aiff',
'.ra' : 'audio/x-pn-realaudio',
'.wav' : 'audio/x-wav',
'.bmp' : 'image/bmp',
'.gif' : 'image/gif',
'.ief' : 'image/ief',
'.jpg' : 'image/jpeg',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.png' : 'image/png',
'.svg' : 'image/svg+xml',
'.tiff' : 'image/tiff',
'.tif' : 'image/tiff',
'.ico' : 'image/vnd.microsoft.icon',
'.ras' : 'image/x-cmu-raster',
'.bmp' : 'image/x-ms-bmp',
'.pnm' : 'image/x-portable-anymap',
'.pbm' : 'image/x-portable-bitmap',
'.pgm' : 'image/x-portable-graymap',
'.ppm' : 'image/x-portable-pixmap',
'.rgb' : 'image/x-rgb',
'.xbm' : 'image/x-xbitmap',
'.xpm' : 'image/x-xpixmap',
'.xwd' : 'image/x-xwindowdump',
'.eml' : 'message/rfc822',
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.nws' : 'message/rfc822',
'.css' : 'text/css',
'.csv' : 'text/csv',
'.html' : 'text/html',
'.htm' : 'text/html',
'.txt' : 'text/plain',
'.bat' : 'text/plain',
'.c' : 'text/plain',
'.h' : 'text/plain',
'.ksh' : 'text/plain',
'.pl' : 'text/plain',
'.rtx' : 'text/richtext',
'.tsv' : 'text/tab-separated-values',
'.py' : 'text/x-python',
'.etx' : 'text/x-setext',
'.sgm' : 'text/x-sgml',
'.sgml' : 'text/x-sgml',
'.vcf' : 'text/x-vcard',
'.xml' : 'text/xml',
'.mp4' : 'video/mp4',
'.mpeg' : 'video/mpeg',
'.m1v' : 'video/mpeg',
'.mpa' : 'video/mpeg',
'.mpe' : 'video/mpeg',
'.mpg' : 'video/mpeg',
'.mov' : 'video/quicktime',
'.qt' : 'video/quicktime',
'.webm' : 'video/webm',
'.avi' : 'video/x-msvideo',
'.movie' : 'video/x-sgi-movie',
}
# These are non-standard types, commonly found in the wild. They will
# only match if strict=0 flag is given to the API methods.
# Please sort these too
common_types = _common_types_default = {
'.rtf' : 'application/rtf',
'.midi': 'audio/midi',
'.mid' : 'audio/midi',
'.jpg' : 'image/jpg',
'.pict': 'image/pict',
'.pct' : 'image/pict',
'.pic' : 'image/pict',
'.xul' : 'text/xul',
}
_default_mime_types()
if __name__ == '__main__':
import getopt
USAGE = """\
Usage: mimetypes.py [options] type
Options:
--help / -h -- print this message and exit
--lenient / -l -- additionally search of some common, but non-standard
types.
--extension / -e -- guess extension instead of type
More than one type argument may be given.
"""
def usage(code, msg=''):
print(USAGE)
if msg: print(msg)
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle',
['help', 'lenient', 'extension'])
except getopt.error as msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess: print("I don't know anything about type", gtype)
else: print(guess)
else:
guess, encoding = guess_type(gtype, strict)
if not guess: print("I don't know anything about type", gtype)
else: print('type:', guess, 'encoding:', encoding)
|
prefetchnta/questlab
|
bin/x64bin/python/37/Lib/mimetypes.py
|
Python
|
lgpl-2.1
| 22,104
|
[
"NetCDF"
] |
18e587ef03dde8214610f57bb07e5a0f2ab3a794eee927a9b6b32265cfcb9007
|
# proxy module
from __future__ import absolute_import
from mayavi.sources.ui.parametric_surface import *
|
enthought/etsproxy
|
enthought/mayavi/sources/ui/parametric_surface.py
|
Python
|
bsd-3-clause
| 105
|
[
"Mayavi"
] |
531105091eeed44d3cadd2d16eec9ee8d7263608114a58f3e7f9e9d4a2642510
|
"""
bct active v0.01
- implements an alternative market classification and scoring algo
Bitcoin Trade Simulator
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
import time
from operator import itemgetter
from math import exp
import sys
import paths
from logs import *
from cache import *
class trade_engine:
def __init__(self):
self.cache = cache()
self.cache_input = True
self.cache_results = True
self.cache_chart = True
self.logs = logs()
#configurable variables
self.input_file_name = ""
self.score_only = False #set to true to only calculate what is required for scoring a strategy
#to speed up performance.
self.shares = 0.1 #order size
self.wll = 180 #window length long
self.wls = 2 #window length short
self.buy_wait = 0 #min sample periods between buy orders
self.buy_wait_after_stop_loss = 6 #min sample periods between buy orders
#after a stop loss order
self.markup = 0.01 #order mark up
self.stop_loss = 0.282 #stop loss
self.enable_flash_crash_protection = True #convert a stop loss order into a short term hold position
self.flash_crash_protection_delay = 180 #max_hold in minutes
self.stop_age = 10000 #stop age - dump after n periods
self.atr_depth = 60 * 1 #period depth of the averae true range, used to split input data into quartiles
self.macd_buy_trip = -0.66 #macd buy indicator
self.rsi_enable = 0 #enable/disable the relative strength indicator
self.rsi_length = 1 #RSI length
self.rsi_period_length = 10 #RSI period length
self.rsi_gate = 50 #RSI gate (RSI must be below gate to enable buy orders)
self.min_i_pos = 0 #min periods of increasing price
#before buy order placed
self.min_i_neg = 0 #min periods of declining price
#before sell order placed
self.stbf = 2.02 #short trade biasing factor
#-- increase to favor day trading
#-- decrease to 2 to eliminate bias
self.nlsf = 5.0 #non-linear scoring factor - favor the latest trades
#max factor = exp(self.nlsf) @ the last sample periord
self.commision = 0.006 #mt.gox commision
self.quartile = 1 #define which market detection quartile to trade on (1-4)
self.input_data = []
self.input_data_length = 0
self.market_class = []
self.current_quartile = 0
self.classified_market_data = False
self.max_length = 1000000
self.reset()
return
def reset(self):
#metrics and state variables
self.history = [] #moving window of the inputs
self.period = 0 #current input period
self.time = 0 #current period timestamp
#self.input_log = [] #record of the inputs
#self.wl_log = [] #record of the wl
#self.ws_log = [] #record of the ws
#self.macd_pct_log = []
#self.rsi_log = []
#self.buy_log = []
#self.sell_log = []
#self.stop_log = []
#self.net_worth_log = []
#self.trigger_log = []
self.logs.reset()
self.balance = 1000 #account balance
self.opening_balance = self.balance #record the starting balance
self.score_balance = 0 #cumlative score
self.text_summary = ""
self.buy_delay = 0 #delay buy counter
self.buy_delay_inital = self.buy_delay #delay buy counter
self.macd_pct = 0
self.macd_abs = 0
self.avg_wl = 0
self.avg_ws = 0
self.ema_short = 0
self.ema_long = 0
self.rsi = 50 #RSI indicator value
self.rsi_gain = [] #RSI avg gain buffer
self.rsi_loss = [] #RSI avg loss buffer
self.rsi_period_buffer = [] #RSI period buffer
self.rsi_period_last = 50 #RSI last buffered period
self.i_pos = 0 #periods of increasing price
self.i_neg = 0 #periods of decreasing price
self.positions_open = [] #open order subset of all trade positions
self.positions = [] #all trade positions
self.metric_macd_pct_max = -10000 #macd metrics
self.metric_macd_pct_min = 10000
self.wins = 0
self.loss = 0
self.order_history = "NOT GENERATED"
self.current_quartile = 0
return
def load_input_data(self):
print "bct_alt: loading input data"
self.input_data = None
if self.cache_input == True:
cache_label = self.input_file_name +'::'+str(self.max_length)
self.input_data = self.cache.get(cache_label)
if self.input_data == None:
f = open(self.input_file_name,'r')
d = f.readlines()
f.close()
if len(d) > self.max_length:
d = d[self.max_length * -1:]
self.input_data = []
for row in d[1:]:
r = row.split(',')[1] #last price
t = row.split(',')[0] #time
self.input_data.append([int(float(t)),float(r)])
print "bct_alt: input data loaded from file."
if self.cache_input == True:
self.cache.set(cache_label,self.input_data)
self.cache.expire(cache_label,60*15)
else:
print "bct_alt:: cached data found.",cache_label
self.input_data_length = len(self.input_data)
return
def initialize(self):
print "bct_alt: initializing"
self.load_input_data()
cm = None
if self.cache_input == True:
cache_label = self.input_file_name + '::bct_slope_classify_market::'+str(self.max_length)+'::atr_depth::'+str(self.atr_depth)
cm = self.cache.get(cache_label)
if cm == None:
print "bct_alt: classifying market data..."
self.classify_market(self.input_data)
if self.cache_input == True:
self.cache.set(cache_label,self.market_class)
self.cache.expire(cache_label,60*15)
else:
print "bct_alt: cached market classification data found.",cache_label
self.market_class = cm
self.classified_market_data = True
return self.current_quartile
def run(self):
for i in self.input_data:
self.input(i[0],i[1])
if self.cache_results == True:
pass
return
def test_quartile(self,quartile):
#valid inputs are 1-4
self.quartile = quartile / 4.0
def classify_market(self,input_list):
#print "start market classify"
#market detection preprocessor splits the input data into
#quartiles based on the true range indicator
self.market_class = []
#calculate the ema weighting multiplier
ema_long_mult = (2.0 / (self.wll + 1) )
ema = 0
ema_history = []
slope = []
t = 0
p = 0
for i in xrange(len(input_list)):
t,p = input_list[i]
t = int(t * 1000)
#bootstrap the ema calc using a simple moving avg if needed
if ema == 0:
for j in xrange(self.wll):
ema += input_list[j][1] / self.wll
else:
#calculate the long and short ema
ema = (p - ema) * ema_long_mult + ema
ema_history.append(ema)
if i < self.wll:
slope.append([t,0.0])
else:
slope.append([t,(p - ema_history[i - self.wll])/p])
self.market_class = slope
#pad the end of the data to support future order testing
for i in xrange(10):
self.market_class.append([t,self.market_class[-1]])
quartiles = []
l = [r[1] for r in self.market_class]
l.sort()
quartiles.append(l[int(len(l) * 0.25)])
quartiles.append(l[int(len(l) * 0.50)])
quartiles.append(l[int(len(l) * 0.75)])
#and apply them to the market class log
for i in xrange(len(self.market_class)):
p = self.market_class[i][1]
self.market_class[i][1] = 0.25
if p > quartiles[0]:
self.market_class[i][1] = 0.50
if p > quartiles[1]:
self.market_class[i][1] = 0.75
if p > quartiles[2]:
self.market_class[i][1] = 1.0
if i < self.atr_depth + 1:
self.market_class[i][1] = 0.0 #ignore early (uncalculated) data
self.classified_market_data = True
self.current_quartile = int(self.market_class[len(self.market_class)-1][1] * 4) #return the current quartile (1-4)
return self.current_quartile
def metrics_report(self):
m = ""
m += "\nShares: " + str(self.shares)
m += "\nMarkup: " + str(self.markup * 100) + "%"
m += "\nStop Loss: " + str(self.stop_loss * 100) + "%"
m += "\nStop Age: " + str(self.stop_age)
m += "\nBuy Delay: " + str(self.buy_wait)
m += "\nBuy Delay After Stop Loss: " + str(self.buy_wait_after_stop_loss)
m += "\nMACD Trigger: " + str(self.macd_buy_trip) + "%"
m += "\nEMA Window Long: " + str(self.wll)
m += "\nEMA Window Short: " + str(self.wls)
m += "\nRSI Enable: " + str(self.rsi_enable)
m += "\nRSI Length: " + str(self.rsi_length)
m += "\nRSI Gate: " + str(self.rsi_gate)
m += "\niPos: " + str(self.i_pos)
m += "\niNeg: " + str(self.i_neg)
m += "\nShort Trade Bias: " + str(self.stbf)
m += "\nCommision: " + str(self.commision * 100) + "%"
m += "\nScore: " + str(self.score())
m += "\nTotal Periods : " + str(self.period)
m += "\nInitial Buy Delay : " + str(self.buy_delay_inital)
m += "\nOpening Balance: $" + str(self.opening_balance)
m += "\nClosing Balance: $" + str(self.balance)
m += "\nTransaction Count: " + str(len(self.positions))
m += "\nWin: " + str(self.wins)
m += "\nLoss: " + str(self.loss)
try:
m += "\nWin Pct: " + str(100 * (self.wins / float(self.wins + self.loss))) + "%"
except:
pass
m += "\nMACD Max Pct: " + str(self.metric_macd_pct_max)+ "%"
m += "\nMACD Min Pct: " + str(self.metric_macd_pct_min)+ "%"
return m
def dump_open_positions(self):
#dump all active trades to get a current balance
self.positions_open = [] #clear out the subset buffer
for position in self.positions:
if position['status'] == "active":
position['status'] = "dumped"
position['actual'] = self.history[1] #HACK! go back in time one period to make sure we're using a real price
#and not a buy order target from the reporting script.
if position['buy_period'] != self.period:
position['sell_period'] = self.period
else:
position['sell_period'] = self.period + 1 #hold for at least one period
self.balance += position['actual'] * (position['shares'] - (position['shares'] * self.commision))
def score(self):
open_positions = len(self.positions_open)
self.dump_open_positions()
if (self.wins + self.loss) > 0:
self.positions = sorted(self.positions, key=itemgetter('buy_period'))
exp_scale = self.nlsf / self.period #float(self.positions[-1]['buy_period'])
final_score_balance = 0
for p in self.positions:
p['age'] = float(p['sell_period'] - p['buy_period'])
p['score'] = (((p['actual'] - p['buy']) / p['buy']) * 100.0 ) * self.shares
#apply non-linear scaling to the trade based on the round trip time (age)
#favors a faster turn around time on positions
p['score'] *= (p['age'] + 1)/(pow(p['age'],self.stbf) + 1)
#apply e^0 to e^nlsf weighting to favor the latest trade results
p['score'] *= exp(exp_scale * p['buy_period'])
final_score_balance += p['score']
#because stop loss will generaly be higher that the target (markup) percentage
#the loss count needs to be weighted by the pct difference
loss_weighting_factor = self.stop_loss / self.markup
final_score_balance *= self.wins / (self.wins + (self.loss * loss_weighting_factor) * 1.0)
#final_score_balance *= self.markup * len(self.positions)
#fine tune the score
final_score_balance -= self.buy_wait / 1000.0
final_score_balance -= self.buy_wait_after_stop_loss / 1000.0
final_score_balance -= (self.stop_loss * 1000)
final_score_balance += (self.wls / 1000.0)
final_score_balance -= (self.stop_age / 1000.0)
final_score_balance += self.shares
final_score_balance += (128.0 - self.rsi_gate) / 1000.0
final_score_balance *= ( 1 + open_positions)
#risk reward weighting
if final_score_balance > 0:
rr = self.markup / (self.stop_loss + 0.00001)
#clamp the risk reward weighting
if rr > 2.0:
rr = 2.0
final_score_balance *= rr
#severly penalize the score if the win/ratio is less than 55%
if self.wins / (self.wins + self.loss * 1.0) < 0.55:
final_score_balance /= 1000.0
#if self.opening_balance > self.balance:
# #losing strategy
# final_score_balance -= 5000 #999999999
else:
#no trades generated
final_score_balance = -987654321.123456789
self.score_balance = final_score_balance
#create the text_summary of the results
self.text_summary = "Balance: " + str(self.balance) +"; Wins: " + str(self.wins)+ "; Loss:" + str(self.loss) + "; Positions: " + str(len(self.positions))
return final_score_balance
def ai(self):
#make sure the two moving averages (window length long and short) don't get inverted
if self.wll < self.wls:
self.wll += self.wls
#decrement the buy wait counter
if self.buy_delay > 0:
self.buy_delay -= 1
current_price = self.history[0]
buy = current_price * -1
initiate_buy_order = False
if self.classified_market_data == False or self.quartile == self.market_class[self.period][1]:
if self.balance > (current_price * self.shares) and self.buy_delay == 0 :
if self.macd_pct < self.macd_buy_trip:
if self.rsi_enable == 0:
initiate_buy_order = True
elif self.rsi < self.rsi_gate:
initiate_buy_order = True
if initiate_buy_order == True:
#set delay until next buy order
self.buy_delay = self.buy_wait
self.balance -= (current_price * self.shares)
actual_shares = self.shares - (self.shares * self.commision)
buy = current_price
target = (buy * self.markup) + buy
stop = buy - (buy * self.stop_loss)
#self.buy_log.append([self.time,buy])
self.logs.append('buy',[self.time,buy])
new_position = {'master_index':len(self.positions),'age':0,'buy_period':self.period,'sell_period':0,'trade_pos': self.balance,'shares':actual_shares,'buy':buy,'cost':self.shares*buy,'target':target,'stop':stop,'status':"active",'actual':0,'score':0}
self.positions.append(new_position.copy())
#maintain a seperate subset of open positions to speed up the search to close the open positions
self.positions_open.append(new_position.copy())
current_net_worth = 0
#check for sold and stop loss orders
sell = current_price * -1
stop = current_price * -1
updated = False
for position in self.positions_open:
#handle sold positions
if position['status'] == "active" and position['target'] <= current_price:
updated = True
position['status'] = "sold"
position['actual'] = position['target']
sell = current_price
position['sell_period'] = self.period
self.wins += 1
self.balance += position['target'] * (position['shares'] - (position['shares'] * self.commision))
buy_period = position['buy_period']
self.positions[position['master_index']] = position.copy()
#handle the stop orders
elif position['status'] == "active" and (position['stop'] >= current_price or position['age'] >= self.stop_age):
if position['stop'] >= current_price:
if self.enable_flash_crash_protection == True and self.market_class[self.period][1] == 1.0:
stop_order_executed = False
#convert the stop loss order into a short term hold position
position['age'] = self.stop_age - self.flash_crash_protection_delay
position['stop'] *= -1.0
else:
#stop loss
stop_order_executed = True
updated = True
position['status'] = "stop"
position['actual'] = current_price
stop = current_price
position['sell_period'] = self.period
self.loss += 1
self.buy_delay += self.buy_wait_after_stop_loss
else:
#stop wait
stop_order_executed = True
updated = True
position['status'] = "stop"
position['actual'] = current_price
stop = current_price
position['sell_period'] = self.period
self.loss += 1
self.buy_delay += self.buy_wait_after_stop_loss
if stop_order_executed == True:
self.balance += position['actual'] * (position['shares'] - (position['shares'] * self.commision))
#update the position in the master list
buy_period = position['buy_period']
self.positions[position['master_index']] = position.copy()
#handle active (open) positions
elif position['status'] == "active":
if not self.score_only:
#position remains open, capture the current value
current_net_worth += current_price * (position['shares'] - (position['shares'] * self.commision))
position['age'] += 1
#remove any closed positions from the open position subset
if updated == True:
self.positions_open = filter(lambda x: x.get('status') == 'active', self.positions_open)
if not self.score_only:
#add the balance to the net worth
current_net_worth += self.balance
if self.classified_market_data == False or self.quartile == self.market_class[self.period][1]:
#self.trigger_log.append([self.time,self.get_target()])
self.logs.append('trigger',[self.time,self.get_target()])
#self.net_worth_log.append([self.time,current_net_worth])
self.logs.append('net_worth',[self.time,current_net_worth])
if sell > 0:
#self.sell_log.append([self.time,sell])
self.logs.append('sell',[self.time,sell])
if stop > 0:
#self.stop_log.append([self.time,stop])
self.logs.append('stop',[self.time,stop])
return
def get_target(self):
#calculates the inverse macd
#wolfram alpha used to transform the macd equation to solve for the trigger price:
price = 0.0
try:
price = -1.0 * (100.0 *(self.wls+1)*(self.wll-1)*self.ema_long + (self.wll+1)*self.ema_short*(self.wls * (self.macd_buy_trip - 100) + self.macd_buy_trip + 100)) / (200 * (self.wls - self.wll))
price -= 0.01 #subtract a penny to satisfy the trigger criteria
except:
price = 0.0
if price < 0.0:
price = 0.0
#clamp the max value
if price > self.history[0]:
price = self.history[0]
#clamp the min value (70% of ema_long)
if price < self.ema_long * 0.7:
price = self.ema_long * 0.7
return price
def rs(self):
#DEBUG
#self.rsi_length = 20
#buffer input until period length has been reached
#this allows the RSI period to differ from the system period
# TODO: Make the RSI buffered period volume weighted
self.rsi_period_buffer.append(self.history[0])
if len(self.rsi_period_buffer) == self.rsi_period_length:
period = sum(self.rsi_period_buffer)/self.rsi_period_length if (self.rsi_period_length) > 0 else 0
self.rsi_period_buffer = []
else:
#buffer not full - no update to RSI
#log the indicator
if not self.score_only:
#self.rsi_log.append([self.time,self.rsi])
self.logs.append('rsi',[self.time,self.rsi])
return
#relative strength indicator
if self.rsi_enable > 0:
#determine if period is a gain or loss and bin the absolute difference
delta = period - self.rsi_period_last
if delta > 0:
#gain
self.rsi_gain.insert(0,delta)
self.rsi_loss.insert(0,0)
elif delta < 0:
#loss
self.rsi_gain.insert(0,0)
self.rsi_loss.insert(0,abs(delta))
else:
#no movement
self.rsi_gain.insert(0,0)
self.rsi_loss.insert(0,0)
else:
self.rsi = 50
if len(self.rsi_gain) > self.rsi_length:
self.rsi_gain = self.rsi_gain[:self.rsi_length]
if len(self.rsi_loss) > self.rsi_length:
self.rsi_loss = self.rsi_loss[:self.rsi_length]
#calculate average gain and loss
avg_gain = sum(self.rsi_gain)/len(self.rsi_gain) if (len(self.rsi_gain)) > 0 else 0
avg_loss = sum(self.rsi_loss)/len(self.rsi_loss) if (len(self.rsi_loss)) > 0 else 0
rs = avg_gain / (avg_loss + 0.00001)
self.rsi = (100.0 - ( 100.0 / (1 + rs)))
#update the last period
self.rsi_period_last = period
#log the indicator
if not self.score_only:
#self.rsi_log.append([self.time,self.rsi])
self.logs.append('rsi',[self.time,self.rsi])
return
def macd(self):
#wait until there is enough data to fill the moving windows
if len(self.history) >= self.wll:
s = 0
l = 0
#calculate the ema weighting multipliers
ema_short_mult = (2.0 / (self.wls + 1) )
ema_long_mult = (2.0 / (self.wll + 1) )
#bootstrap the ema calc using a simple moving avg if needed
if self.ema_long == 0:
for i in xrange(self.wll):
if i < self.wls:
s += self.history[i]
l += self.history[i]
self.avg_ws = s / self.wls
self.avg_wl = l / self.wll
self.ema_long = self.avg_wl
self.ema_short = self.avg_ws
else:
#calculate the long and short ema
self.ema_long = (self.history[0] - self.ema_long) * ema_long_mult + self.ema_long
self.ema_short = (self.history[0] - self.ema_short) * ema_short_mult + self.ema_short
#calculate the absolute and pct differences between the
#long and short emas
self.macd_abs = self.ema_short - self.ema_long
self.macd_pct = (self.macd_abs / self.ema_short) * 100
if not self.score_only:
#track the max & min macd pcts (metric)
if self.macd_pct > self.metric_macd_pct_max:
self.metric_macd_pct_max = self.macd_pct
if self.macd_pct < self.metric_macd_pct_min:
self.metric_macd_pct_min = self.macd_pct
else:
self.ema_short = self.history[0]
self.ema_long = self.history[0]
self.macd_pct = 0
#log the indicators
if not self.score_only:
#self.macd_pct_log.append([self.time,self.macd_pct])
#self.wl_log.append([self.time,self.ema_long])
#self.ws_log.append([self.time,self.ema_short])
self.logs.append('macd',[self.time,self.macd_pct])
self.logs.append('wll',[self.time,self.ema_long])
self.logs.append('wls',[self.time,self.ema_short])
def display(self):
#used for debug
print ",".join(map(str,[self.history[0],self.macd_pct,self.buy_wait]))
def input(self,time_stamp,record):
self.period += 1 #increment the period counter
if not self.score_only:
#self.time = int(time.mktime(time.strptime(time_stamp))) * 1000
self.time = int(time_stamp * 1000)
self.logs.append('price',[self.time,record])
###Date,Sell,Buy,Last,Vol,High,Low,###
self.history.insert(0,record)
if len(self.history) > (self.wll + self.wls):
self.history.pop() #maintain a moving window of
#the last wll records
self.macd() #calc macd
if self.rsi_enable > 0:
self.rs() #calc RSI
self.ai() #call the trade ai
#self.display()
return
def log_orders(self,filename=None):
self.order_history = ""
print "log_orders: sorting data"
self.positions = sorted(self.positions, key=itemgetter('buy_period'),reverse=True)
if len(self.positions) > 0:
keys = self.positions[0].keys()
#write the header
self.order_history = "<table class='imgtbl'>\n"
self.order_history +="<tr>"
for key in keys:
self.order_history +="<th>%s</th>"%key
self.order_history +="</tr>\n"
#only htmlize the last positions so the browser doesn't blow up ;)
reported_position_count_limit = 200
reported_position_count = 0
print "bct_alt:log_orders: generating html table for %s positions"%(len(self.positions))
for p in self.positions:
if reported_position_count >= reported_position_count_limit:
break
#I dont care about the dumped positions, they're not real transactions anyway.
#They're only generated to calculate/report the current account value.
if p['status']!='dumped':
reported_position_count += 1
self.order_history +="<tr>"
for key in keys:
if p.has_key(key):
#I dont care about the dumped positions, they're not real transactions anyway.
#They're only generated to calculate/report the current account value.
if p['status']!='dumped':
if p['status']=='stop':
color = 'r'
elif p['status']=='dumped': #Im leaving this here in case I want to turn it back on.
color = 'y'
elif p['status']=='sold':
color = 'g'
else:
color = 'b'
self.order_history +="<td class='%s'>"%color
if type(p[key]) == type(1.0):
self.order_history += "%.2f"% round(p[key],2)
else:
self.order_history += str(p[key])
self.order_history +="</td>"
elif p['status']!='dumped':
self.order_history +="<td>N/A</td>"
if p['status']!='dumped':
self.order_history +="</tr>\n"
self.order_history += "</table>"
return
def log_transactions(self,filename):
#log the transactions to a file
#used with excel / gdocs to chart price and buy/sell indicators
f = open(filename,'w')
input_log = self.logs.get('price')
for i in xrange(len(input_log)):
for position in self.positions:
if position['buy_period'] == i:
#print position['buy_period'],i
input_log[i].append('buy')
input_log[i].append(position['sell_period'] - position['buy_period'])
input_log[i].append(position['status'])
input_log[i].append(i)
if position['sell_period'] == i:
input_log[i].append('sell')
input_log[i].append('0')
input_log[i].append(position['status'])
input_log[i].append(i)
r = ",".join(map(str,input_log[i]))
f.write(r)
f.write('\n')
f.close()
return
def compress_log(self,log,lossless_compression = False):
#removes records with no change in price, before and after record n
compressible = True
while compressible:
compressible = False
ret_log = []
for i in xrange(len(log)):
if type(log[i][1]) == float:
log[i][1] = float("%.3f"%log[i][1])
if i >= 1 and i < len(log) - 1:
if log[i-1][1] == log[i][1] and log[i+1][1] == log[i][1]:
compressible = True #no change in value before or after, omit record
else:
ret_log.append(log[i])
else:
ret_log.append(log[i])
log = ret_log
if lossless_compression == True:
return ret_log
while len(log) > 2000:
avg = log[0][1]
avg = (log[0][1] - avg) * 0.2 + avg
ret_log = [log[0]] #capture the first record
for i in xrange(1,len(log),2):
#find which sample that deviates the most from the average
a = abs(log[i][1] - avg)
b = abs(log[i-1][1] - avg)
if a > b:
ret_log.append(log[i])
else:
ret_log.append(log[i-1])
#update the moving average
avg = (log[i-1][1] - avg) * 0.2 + avg
avg = (log[i][1] - avg) * 0.2 + avg
ret_log.append(log[len(log)-1]) #make sure the last record is captured
log = ret_log
return ret_log
def cache_output(self,cache_name,periods=80000):
p = self.logs.get('price')
if len(p) > periods:
self.logs.prune_logs(p[-1*periods][0])
self.logs.compress_logs(exclude_keys=['buy','sell','stop','trigger'],lossless_compression = False, max_lossy_length = 10000)
self.cache.set(cache_name,self.logs.json())
self.cache.expire(cache_name,60*25)
def chart(self,template,filename,periods=-1,basic_chart=False,write_cache_only=False):
self.log_orders()
f = open(template,'r')
tmpl = f.read()
f.close()
if periods < 0:
periods = self.period * -1
else:
periods *= -1
#insert all quartiles at the begining of the market class data to ensure correct
#chart scaling. This covers the case where the chart period doesn't see all quartiles
mc = self.market_class[periods:]
t = mc[0][0]
for i in range(0,4):
t += 1
q = (i + 1) / 4.0
mc.insert(0,[t,q])
print "bct_alt:chart: compressing data"
if not basic_chart:
wl = str(self.compress_log(self.logs.get('wll')[periods:])).replace('L','')
ws = str(self.compress_log(self.logs.get('wls')[periods:])).replace('L','')
net_worth = str(self.compress_log(self.logs.get('net_worth')[periods:],lossless_compression = True)).replace('L','')
else:
wl = str([])
ws = str([])
net_worth = str([])
macd_pct = str(self.compress_log(self.logs.get('macd')[periods:])).replace('L','')
input = str(self.compress_log(self.logs.get('price')[periods:])).replace('L','')
volatility_quartile = str(self.compress_log(mc,lossless_compression = True)).replace('L','')
if self.rsi_enable > 0:
rsi = str(self.compress_log(self.logs.get('rsi')[periods:],lossless_compression = True))
##DEBUG
macd_pct = rsi
tmpl = tmpl.replace("MACD PCT","RSI")
buy = str([])
sell = str([])
stop = str([])
trigger_price = str([])
self.logs.addkey('buy') #add keys to the log
self.logs.addkey('sell') # - creates an empty log if it doesn't exist already
self.logs.addkey('stop')
self.logs.addkey('trigger')
if periods == self.period:
buy = str(self.logs.get('buy')[periods:]).replace('L','')
sell = str(self.logs.get('sell')[periods:]).replace('L','')
stop = str(self.logs.get('stop')[periods:]).replace('L','')
trigger_price = str(self.compress_log(self.logs.get('trigger')[periods:],lossless_compression = True)).replace('L','')
else:
print "bct_alt:chart: selecting data"
#get the timestamp for the start index
time_stamp = self.logs.get('price')[periods:periods+1][0][0]
#search the following for the time stamp
for i in xrange(len(self.logs._log['buy'])):
if self.logs._log['buy'][i][0] >= time_stamp:
buy = str(self.logs._log['buy'][i:]).replace('L','')
break
for i in xrange(len(self.logs._log['sell'])):
if self.logs._log['sell'][i][0] >= time_stamp:
sell = str(self.logs._log['sell'][i:]).replace('L','')
break
for i in xrange(len(self.logs._log['stop'])):
if self.logs._log['stop'][i][0] >= time_stamp:
stop = str(self.logs._log['stop'][i:]).replace('L','')
break
for i in xrange(len(self.logs._log['trigger'])):
if self.logs._log['trigger'][i][0] >= time_stamp:
trigger_price = str(self.logs._log['trigger'][i:]).replace('L','')
break
print "bct_alt:chart: filling the template"
tmpl = tmpl.replace("{LAST_UPDATE}",time.ctime())
tmpl = tmpl.replace("{PRICES}",input)
tmpl = tmpl.replace("{WINDOW_LONG}",wl)
tmpl = tmpl.replace("{WINDOW_SHORT}",ws)
tmpl = tmpl.replace("{MACD_PCT}",macd_pct)
tmpl = tmpl.replace("{BUY}",buy)
tmpl = tmpl.replace("{SELL}",sell)
tmpl = tmpl.replace("{STOP}",stop)
tmpl = tmpl.replace("{NET_WORTH}",net_worth)
tmpl = tmpl.replace("{TRIGGER_PRICE}",trigger_price)
tmpl = tmpl.replace("{METRICS_REPORT}",self.metrics_report().replace('\n','<BR>'))
tmpl = tmpl.replace("{ORDERS_REPORT}",self.order_history)
tmpl = tmpl.replace("{VOLATILITY_QUARTILE}",volatility_quartile)
if write_cache_only == False:
print "bct_alt:chart: writing the data to a file"
f = open(filename,'w')
f.write(tmpl)
f.close()
if self.cache_chart == True:
print "bct_alt:chart: caching html chart:",filename
self.cache.set(filename,tmpl)
return
def test():
te = trade_engine()
#set the trade engine class vars
te.shares = 0.1
te.wll = 242
te.wls = 1
te.buy_wait = 0
te.markup = 0.01
te.stop_loss = 0.128
te.stop_age = 2976
te.macd_buy_trip = -0.02
te.min_i_neg = 2
te.min_i_pos = 0
te.buy_wait_after_stop_loss = 0
for row in d[1:]:
r = row.split(',')[1] #last
t = row.split(',')[0] #time
te.input(float(t),float(r))
return te
if __name__ == "__main__":
import pdb
__appversion__ = "0.02a"
print "Bitcoin trade simulator profiler v%s"%__appversion__
print " -- this is a test script to profile the performance of bct.py"
print " -- the trade results should be ignored as the trade strategy inputs"
print " are designed to stress the module with many trade positions"
print ""
print "Profiling bct...(This is going to take a while)"
#open the history file
f = open("./datafeed/bcfeed_mtgoxUSD_1min.csv",'r')
d = f.readlines()
f.close()
import hotshot,hotshot.stats
prof = hotshot.Profile("bct.prof")
te = prof.runcall(test)
prof.close()
stats = hotshot.stats.load("bct.prof")
stats.strip_dirs()
stats.sort_stats('time','calls')
stats.print_stats(20)
print "Score:",te.score()
print "Closing Balance:",te.balance
print "Transaction Count: ",len(te.positions)
#Commented out the follwing reports -- they generate very large files and in the case of this test script of limited use.
#print "Generating reports..."
#te.log_transactions('./report/profile_transactions.csv')
#te.log_orders('./report/profile_orders.csv')
#te.chart("./report/chart.templ","./report/chart_profile.html")
print "Done."
|
stahn/ga-bitbot
|
bct_active.py
|
Python
|
gpl-3.0
| 39,122
|
[
"Brian"
] |
de35590bb4baec8f4feb45b43cf8288278fb467e8d3d9ae579492327558aa8bc
|
#!/usr/bin/env python
"""Writer for FASTA sequence format"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.alignment import Alignment
from skbio.sequence import BiologicalSequence
def fasta_from_sequences(seqs, make_seqlabel=None, line_wrap=None):
"""Returns a FASTA string given a list of sequence objects.
A ``sequence.Label`` attribute takes precedence over ``sequence.Name``.
Parameters
----------
seqs : list
seqs can be a list of sequence objects or strings.
make_seqlabel : function, optional
callback function that takes the seq object and returns a label
``str``. If ``None`` is passed, the following attributes will try to be
retrieved in this order and the first to exist will be used:
``id``, ``Label`` or ``Name``. In any other case an integer
with the position of the sequence object will be used.
line_wrap : int, optional
line_wrap: a integer for maximum line width, if ``None`` is passed the
full sequence will be used.
Returns
-------
str
FASTA formatted string composed of the objects passed in via `seqs`.
See Also
--------
skbio.parse.sequences.parse_fasta
Examples
--------
Formatting a list of sequence objects
>>> from skbio.format.sequences import fasta_from_sequences
>>> from skbio.sequence import DNASequence
>>> seqs = [DNASequence('ACTCGAGATC', 'seq1'),
... DNASequence('GGCCT', 'seq2')]
>>> print fasta_from_sequences(seqs)
>seq1
ACTCGAGATC
>seq2
GGCCT
"""
fasta_list = []
for i, seq in enumerate(seqs):
# Check if it has a label, or one is to be created
label = str(i)
if make_seqlabel is not None:
label = make_seqlabel(seq)
elif hasattr(seq, 'id') and seq.id:
label = seq.id
elif hasattr(seq, 'Label') and seq.Label:
label = seq.Label
elif hasattr(seq, 'Name') and seq.Name:
label = seq.Name
# wrap sequence lines
seq_str = str(seq)
if line_wrap is not None:
numlines, remainder = divmod(len(seq_str), line_wrap)
if remainder:
numlines += 1
body = [seq_str[j * line_wrap:(j + 1) * line_wrap]
for j in range(numlines)]
else:
body = [seq_str]
fasta_list.append('>' + label)
fasta_list += body
return '\n'.join(fasta_list)
def fasta_from_alignment(aln, make_seqlabel=None, line_wrap=None, sort=True):
"""Returns a FASTA string given an alignment object
Parameters
----------
aln : Alignment, dict
alignment or dictionary where the keys are the sequence ids and
the values are the sequences themselves.
make_seqlabel : function, optional
callback function that takes the seq object and returns a label
``str``. If ``None`` is passed, the following attributes will try to be
retrieved in this order and the first to exist will be used:
``id``, ``Label`` or ``Name``. In any other case an integer
with the position of the sequence object will be used.
line_wrap : int, optional
line_wrap: a integer for maximum line width, if ``None`` is passed the
full sequence will be used.
sort : bool, optional
Whether or not the sequences should be sorted by their sequence
id, default value is ``True``.
Returns
-------
str
FASTA formatted string composed of the objects passed in via `seqs`.
See Also
--------
skbio.parse.sequences.parse_fasta
skbio.alignment.Alignment
Examples
--------
Formatting a sequence alignment object into a FASTA file.
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> from skbio.format.sequences import fasta_from_alignment
>>> seqs = [DNA("ACC--G-GGTA..", id="seq1"),
... DNA("TCC--G-GGCA..", id="seqs2")]
>>> a1 = Alignment(seqs)
>>> print fasta_from_alignment(a1)
>seq1
ACC--G-GGTA..
>seqs2
TCC--G-GGCA..
"""
# check if it's an Alignment object or a dictionary
if isinstance(aln, Alignment):
order = aln.ids()
else:
order = aln.keys()
if sort:
order = sorted(order)
ordered_seqs = []
for label in order:
seq = aln[label]
if isinstance(seq, str):
seq = BiologicalSequence(seq, label)
ordered_seqs.append(seq)
return fasta_from_sequences(ordered_seqs, make_seqlabel=make_seqlabel,
line_wrap=line_wrap)
|
JWDebelius/scikit-bio
|
skbio/format/sequences/fasta.py
|
Python
|
bsd-3-clause
| 4,987
|
[
"scikit-bio"
] |
2916660f294c0d3c984b2264742fb3025ba21cb564002799b90b39ae88401e55
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problem.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/svm/plot_iris.py
|
Python
|
bsd-3-clause
| 3,251
|
[
"Gaussian"
] |
f2fa4ce1edd6c255cc66907a6cb2a7917c228f9f54c4294e26d738f0e3732037
|
import matplotlib.pyplot as plt
import netCDF4
import os
import pandas
def read_netcdf(nc_full_name,variable_list=[]):
"""
Purpose:
Read an OzFlux netCDF file and return the data in an Pandas data frame.
Usage:
df = qcio.nc_read_todf(nc_full_name,variable_list=variable_list)
where nc_full_name is the full name of the netCDF file.
variable_list (optional) is an list of variables to be read
If variable_list is not passed, all variables in the netCDF are returned.
Side effects:
Returns a Pandas data frame containing the data indexed by datetime and
a dictionary containing the global and variable attributes.
Author: PRI using code originally written by Ian McHugh
Date: June 2015
"""
# check to see if the file exists
if "http" not in nc_full_name.lower():
if not os.path.exists(nc_full_name):
raise Exception("read_netcdf: input file "+nc_full_name+" not found")
# read the netCDF file
nc_file = netCDF4.Dataset(nc_full_name,"r")
# create a dictionary to hold the global and variable attributes
attr = {}
attr["global"] = {}
attr["variable"] = {}
# now deal with the global attributes
gattrlist = nc_file.ncattrs()
if len(gattrlist)!=0:
for item in gattrlist:
attr["global"][item] = getattr(nc_file,item)
# get a list of Python datetimes from the xlDatetime
time = time = nc_file.variables["time"][:]
time_units = getattr(nc_file.variables["time"],"units")
dates_list = list(netCDF4.num2date(time,time_units))
# get a list of variables to read from the netCDF file
# was a variable list passed in as variable_list?
if len(variable_list)==0:
# if not, get the variable list from the netCDF file contents
variable_list = nc_file.variables.keys()
else:
# if so, add the QC flags to the list entered as an argument
flag_list = []
for item in variable_list: flag_list.append(item+"_QCFlag")
variable_list = variable_list+flag_list
# read the variables and attributes from the netCDF file
# create a dictionary to hold the data
data = {}
# loop over the variables to be read
for item in variable_list:
# get the number of dimensions
# variables in OzFlux netCDF files can have 1 (time) or 3 dimensions (time,latitude,longitude)
ndims = len(nc_file.variables[item].shape)
if ndims==1:
data[item] = ncFile.variables[item][:]
elif ndims==3:
# drop the degenerate dimensions (latitude and longitude)
data[item] = nc_file.variables[item][:,0,0]
else:
raise Exception("unrecognised number of dimensions for variable"+str(item))
# get the variable attributes
vattrlist = nc_file.variables[item].ncattrs()
if len(vattrlist)!=0:
attr["variable"][item] = {}
for vattr in vattrlist:
attr["variable"][item][vattr] = getattr(nc_file.variables[item],vattr)
nc_file.close()
# convert the dictionary to a Pandas data frame
df = pandas.DataFrame(data,index=dates_list)
return df,attr
# read the variables from the local netCDF file
nc_full_name = "../../Sites/Whroo/Data/Processed/all/Whroo_2011_to_2014_L6.nc"
variable_list = ['Fsd','Ta','VPD','NEE_SOLO']
print "reading local netCDF file"
df,attr = read_netcdf(nc_full_name,variable_list=variable_list)
# plot the variables
print "plotting local netCDF file"
fig = plt.figure(1)
plt.figtext(0.5,0.95,"Local file",horizontalalignment='center')
ax1 = plt.subplot(411)
ax1.plot(df.index.values,df['Fsd'])
ax2 = plt.subplot(412,sharex=ax1)
ax2.plot(df.index.values,df['Ta'])
ax3 = plt.subplot(413,sharex=ax1)
ax3.plot(df.index.values,df['VPD'])
ax4 = plt.subplot(414,sharex=ax1)
ax4.plot(df.index.values,df['NEE_SOLO'])
plt.show()
# read the variables from the remote netCDF file
nc_dap_name = "http://dap.ozflux.org.au/thredds/dodsC/ozflux/sites/Whroo/L6/Whroo_2011_to_2014_L6.nc"
variable_list = ['Fsd','Ta','VPD','NEE_SOLO']
print "reading remote netCDF file"
df,attr = read_netcdf(nc_dap_name,variable_list=variable_list)
# plot the variables
print "plotting remote netCDF file"
fig = plt.figure(2)
plt.figtext(0.5,0.95,"OPeNDAP file",horizontalalignment='center')
ax1 = plt.subplot(411)
ax1.plot(df.index.values,df['Fsd'])
ax2 = plt.subplot(412,sharex=ax1)
ax2.plot(df.index.values,df['Ta'])
ax3 = plt.subplot(413,sharex=ax1)
ax3.plot(df.index.values,df['VPD'])
ax4 = plt.subplot(414,sharex=ax1)
ax4.plot(df.index.values,df['NEE_SOLO'])
plt.show()
|
OzFlux/OzFluxQC
|
utilities/read_example.py
|
Python
|
gpl-3.0
| 4,619
|
[
"NetCDF"
] |
5f09de4109ba1a7d46fee6702a0fe264359ee51fb0ef4b90f8e21f073af1a377
|
############################################################################
# Copyright (c) 2015 Saint Petersburg State University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
__author__ = 'anton'
import getopt
import os
import sys
import options_storage
class Options:
def set_default_options(self):
self.threads = 8
self.dataset_file = None
self.input_dirs = None
self.print_dataset = False
self.print_commands = False
self.output_dir = None
self.command_list = None
self.spades_options = ""
self.continue_launch = False
self.index = ""
self.reference = ""
self.mode = "run_truspades"
self.possible_modes = ["run_truspades", "generate_dataset", "construct_subreferences"]
self.test = False
self.clean = False
def __init__(self, argv, bin, home, version):
if len(argv) == 1:
print_usage_and_exit(1, version)
long_params = "test clean help-hidden construct-dataset reference= reference-index= do= continue " \
"threads= help version dataset= input-dir= additional-options=".split(" ")
short_params = "o:t:hv"
self.set_default_options()
self.bin = bin
self.home = home
self.version = version
try:
options_list, tmp = getopt.gnu_getopt(argv[1:], short_params, long_params)
if len(tmp) != 0:
print_usage_and_exit(1, self.version)
except getopt.GetoptError:
_, exc, _ = sys.exc_info()
sys.stderr.write(str(exc) + "\n")
print_usage_and_exit(1, self.version)
for (key, value) in options_list:
if key == "--version" or key == "-v":
print_version_and_exit(self.version)
if key == "--help" or key == "-h":
print_usage_and_exit(1, self.version)
elif key == "--test":
dir = os.path.abspath("spades_test") + "_truspades"
self.output_dir = dir
self.input_dirs = [os.path.join(self.home, "test_dataset_truspades")]
self.test = True
elif key == "--do":
self.mode = value
elif key == "--construct-dataset":
self.mode = "generate_dataset"
elif key == "--dataset":
self.dataset_file = value
elif key == "--input-dir":
if self.input_dirs is None:
self.input_dirs = []
self.input_dirs.append(value)
elif key == "--run-truspades":
self.mode = "run_truspades"
elif key == "--reference-index":
self.index = value
elif key == "--reference":
self.reference = value
elif key == "--continue":
self.continue_launch = True
elif key == "--additional-options":
self.spades_options = value
elif key == "-o":
self.output_dir = value
elif key == "--threads" or key == "-t":
self.threads = int(value)
elif key == "--clean":
self.clean = True
elif key == "--help-hidden":
print_usage_and_exit(0, self.version, show_hidden=True)
if not self.mode in self.possible_modes:
sys.stderr.write("Error: --do parameter can only have one of the following values: " + ", ".join(self.possible_modes) + "\n")
print_usage_and_exit(1, self.version)
if None == self.output_dir or os.path.isfile(self.output_dir):
sys.stderr.write("Error: Please provide output directory\n")
print_usage_and_exit(1, self.version)
if self.continue_launch:
return
cnt = len([option for option in [self.dataset_file, self.input_dirs, self.command_list] if option != None])
if cnt != 1:
sys.stderr.write("Error: exactly one of dataset-file and input-dir must be specified\n")
print_usage_and_exit(1, self.version)
if self.mode == "construct_subreferences":
if self.index == "":
sys.stderr.write("Error: Please provide reference index for BWA")
print_usage_and_exit(1, self.version)
if self.reference == "":
sys.stderr.write("Error: Please provide reference for subreference construction")
print_usage_and_exit(1, self.version)
def print_usage_and_exit(code, version, show_hidden=False):
sys.stderr.write("SPAdes genome assembler v" + str(version) + " [truSPAdes mode]\n\n")
sys.stderr.write("Usage: " + str(sys.argv[0]) + " [options] -o <output_dir>" + "\n")
sys.stderr.write("" + "\n")
sys.stderr.write("Basic options:" + "\n")
sys.stderr.write("-h/--help\t\t\tprints this usage message" + "\n")
sys.stderr.write("-v/--version\t\t\tprints version" + "\n")
sys.stderr.write("--test\t\t\t\trun truSPAdes on toy dataset" + "\n")
sys.stderr.write("-o\t\t<output_dir>\tdirectory to store all the resulting files (required)" + "\n")
sys.stderr.write("-t/--threads\t<int>\t\tnumber of threads" + "\n")
sys.stderr.write("--continue\t\t\tcontinue interrupted launch" + "\n")
sys.stderr.write("--construct-dataset\t\tparse dataset from input folder" + "\n")
sys.stderr.write("" + "\n")
sys.stderr.write("Input options:" + "\n")
sys.stderr.write("--input-dir\t<directory>\tdirectory with input data. Note that the directory should contain only files with reads. This option can be used several times to provide several input directories." + "\n")
sys.stderr.write("--dataset\t<file>\t\tfile with dataset description" + "\n")
if show_hidden:
pass
#ToDo
# sys.stderr.write("" + "\n")
# sys.stderr.write("Output options:" + "\n")
# sys.stderr.write("--print-dataset\tprints file with dataset generated after analysis of input directory contents" + "\n")
# sys.stderr.write("--print-commands\tprints file with truspades commands that would assemble barcodes from dataset" + "\n")
# sys.stderr.write("--run-truspades\truns truSPAdes on all barcodes" + "\n")
sys.stderr.flush()
sys.exit(code)
def print_version_and_exit(version):
options_storage.version(version, mode="tru")
sys.exit(0)
|
INNUENDOWEB/INNUca
|
src/SPAdes-3.9.0-Linux/share/spades/spades_pipeline/truspades/launch_options.py
|
Python
|
gpl-3.0
| 6,476
|
[
"BWA"
] |
b7a312a17b24fe6249a5ba9ac3f4e77978e19af8e84dc3d9cc781b75f12725e7
|
#!/usr/bin/env python2
"""
Creates various combinations of latitude and longitude lines
to demonstrate different grid types for globes. (This requires
the MayaVi visualization library).
The last scene crudely shows what SpaceX's satellite constellation
*may* look like. Altitudes are to scale, but satellite bodies are
exaggerated for visibility. Data was found here:
https://cdn3.vox-cdn.com/uploads/chorus_asset/file/8174403/SpaceX_Application_-.0.pdf
"""
from __future__ import division
import numpy as np; npl = np.linalg
import mayavi.mlab as maya
#####################################################################
def R(roll, pitch, yaw):
"""
Generates the 3D rotation matrix that corresponds to the given Euler angles.
The Euler angles are accepted in DEGREES. Sequence of application is extrinsic x-y-z.
"""
ai, aj, ak = np.deg2rad((roll, pitch, yaw))
si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)
ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
return np.array([[cj*ck, sj*sc-cs, sj*cc+ss],
[cj*sk, sj*ss+cc, sj*cs-sc],
[ -sj, cj*si, cj*ci]])
def grid_lines(form, n, R=np.eye(3), res=0.01):
"""
Returns n evenly-spaced unit-sphere grid-lines with orientation R and plotting resolution res.
The form is either 'lat' (latitude) or 'lon' (longitude).
R is a 3D rotation matrix (use the R function above to generate it from Euler angles).
The grid lines are returned as an array of shape (n, 3, 2*pi/res).
Example: grid_lines(...)[3, 1, 4] is the third line's y-coordinate of its fourth point.
"""
U = np.linspace(-np.pi/2, np.pi/2, n)
V = np.arange(-np.pi, np.pi+res, res)
lines = np.zeros((n, 3, len(V)))
for i, u in enumerate(U):
for j, v in enumerate(V):
if form == 'lat': lines[i, :, j] = np.cos(u)*np.cos(v), np.cos(u)*np.sin(v), np.sin(u)
elif form == 'lon': lines[i, :, j] = np.cos(v)*np.cos(u), np.cos(v)*np.sin(u), np.sin(v)
else: raise ValueError("Invalid form. Choose 'lat' or 'lon'.")
lines[i] = R.dot(lines[i])
return lines
class Sphere(object):
"""
Generates points for a sphere at the origin. Specify the radius and
the resolution (number of steps from 0 to 2*pi) in the __init__.
Call mysphere.draw(color) to add the sphere to the current maya figure.
"""
def __init__(self, radius=1.0, res=40):
self.u, self.v = np.mgrid[0:2*np.pi:res*1j, 0:np.pi:res*1j]
self.x = radius*np.cos(self.u)*np.sin(self.v)
self.y = radius*np.sin(self.u)*np.sin(self.v)
self.z = radius*np.cos(self.v)
self.draw = lambda color: maya.mesh(self.x, self.y, self.z, color=color)
#####################################################################
# Common parameters
n = 10 # number of grid_lines
lr = 0.01 # tube radius of the lines themselves
red, green, blue = map(tuple, np.eye(3))
black, gray, white = (0,)*3, (0.5,)*3, (1,)*3
sphere = Sphere()
fig1 = maya.figure(1, bgcolor=white, fgcolor=white)
for x, y, z in grid_lines('lat', n): maya.plot3d(x, y, z, color=red, tube_radius=lr)
for x, y, z in grid_lines('lon', n): maya.plot3d(x, y, z, color=blue, tube_radius=lr)
maya.title("lat-lon", color=black, height=0.875, size=0.5)
sphere.draw(gray)
fig2 = maya.figure(2, bgcolor=white, fgcolor=white)
for x, y, z in grid_lines('lon', n): maya.plot3d(x, y, z, color=red, tube_radius=lr)
for x, y, z in grid_lines('lon', n, R(90, 0, 0)): maya.plot3d(x, y, z, color=blue, tube_radius=lr)
maya.title("lon-lon", color=black, height=0.875, size=0.5)
sphere.draw(gray)
fig3 = maya.figure(3, bgcolor=white, fgcolor=white)
for x, y, z in grid_lines('lat', n): maya.plot3d(x, y, z, color=red, tube_radius=lr)
for x, y, z in grid_lines('lat', n, R(90, 0, 0)): maya.plot3d(x, y, z, color=blue, tube_radius=lr)
for x, y, z in grid_lines('lat', 3, R(0, 90, 0)): maya.plot3d(x, y, z, color=green, tube_radius=lr)
maya.title("lat-lat-orth", color=black, height=0.875, size=0.5)
sphere.draw(gray)
fig4 = maya.figure(4, bgcolor=white, fgcolor=white)
for x, y, z in grid_lines('lat', n): maya.plot3d(x, y, z, color=red, tube_radius=lr)
for x, y, z in grid_lines('lat', n, R(90, 0, 0)): maya.plot3d(x, y, z, color=blue, tube_radius=lr)
for x, y, z in grid_lines('lat', n-1, R(0, 90, 0)): maya.plot3d(x, y, z, color=green, tube_radius=lr)
maya.title("lat-lat-lat", color=black, height=0.875, size=0.5)
sphere.draw(gray)
fig5 = maya.figure(5, bgcolor=white, fgcolor=white)
for x, y, z in grid_lines('lat', n): maya.plot3d(x, y, z, color=black, tube_radius=lr/5)
for x, y, z in grid_lines('lon', n): maya.plot3d(x, y, z, color=black, tube_radius=lr/5)
for color, incl, dens, alt, num in zip([red, green, blue, (1, 0, 1)], [53, 74, 81, 70], [32, 8, 5, 6], [1110, 1130, 1275, 1325], [50, 50, 75, 75]):
for i in np.linspace(0, 360, dens):
for x, y, z in (1+alt/6371)*grid_lines('lon', 1, R(0, 90-incl, i)):
maya.plot3d(x, y, z, color=color, tube_radius=lr/5)
maya.points3d(x[::len(x)//num], y[::len(x)//num], z[::len(x)//num], scale_factor=0.02, color=color)
maya.title("SpaceX", color=black, height=0.875, size=0.5)
sphere.draw(gray)
maya.show()
|
jnez71/demos
|
geometry/globe_grids.py
|
Python
|
mit
| 5,276
|
[
"Mayavi"
] |
4e870fb93df8c2755bdfcd0797bfbd546f803e03e9b994946a6fd128a5e80264
|
#!/usr/bin/env python
import roslib,rospy,sys,cv2,time
import numpy as np
roslib.load_manifest('lane_follower')
from std_msgs.msg import Int32
from sensor_msgs.msg import CompressedImage, Image
from cv_bridge import CvBridge, CvBridgeError
bridge = CvBridge()
pub = rospy.Publisher('lane_detection', Int32, queue_size=10) #ros-lane-detection
pub_image = rospy.Publisher('lane_detection_image',Image,queue_size=1)
def callback(data):
# convert image to cv2 standard format
img = bridge.compressed_imgmsg_to_cv2(data)
# start time
start_time = cv2.getTickCount()
# Gaussian Filter to remove noise
img = cv2.medianBlur(img,5)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# print img.shape = (200,350,3)
rows,cols,channels = img.shape
# ROI
roi_mask = np.zeros(img.shape,dtype=np.uint8)
roi_mask[10:rows,0:cols] = 255
street = cv2.bitwise_and(img,roi_mask)
stop_roi_mask = np.zeros(gray.shape,dtype=np.uint8)
stop_roi_mask[150:rows,150:250] = 255
right_roi_mask = np.zeros(gray.shape,dtype=np.uint8)
right_roi_mask[150:rows,200:360] = 255
right_roi = cv2.bitwise_and(img,img,right_roi_mask)
left_roi_mask = np.zeros(gray.shape,dtype=np.uint8)
left_roi_mask[150:rows,0:200] = 255
left_roi = cv2.bitwise_and(img,img,left_roi_mask)
# define range of color in HSV
hsv = cv2.cvtColor(street,cv2.COLOR_BGR2HSV)
sensitivity = 160 # range of sensitivity=[90,150]
lower_white = np.array([0,0,255-sensitivity])
upper_white = np.array([255,sensitivity,255])
white_mask = cv2.inRange(hsv,lower_white,upper_white)
white_mask = cv2.erode(white_mask, None, iterations=2)
white_mask = cv2.dilate(white_mask, None, iterations=2)
lower_yellow = np.array([0,100,100]) #0,100,100
upper_yellow = np.array([40,255,255]) #30,255,255
yellow_mask = cv2.inRange(hsv,lower_yellow,upper_yellow)
yellow_mask = cv2.erode(yellow_mask, None, iterations=2)
yellow_mask = cv2.dilate(yellow_mask, None, iterations=2)
# mask AND original img
whitehsvthresh = cv2.bitwise_and(right_roi,right_roi,mask=white_mask)
yellowhsvthresh = cv2.bitwise_and(street,street,mask=yellow_mask)
# Canny Edge Detection
right_edges = cv2.Canny(whitehsvthresh,100,200)
left_edges = cv2.Canny(yellowhsvthresh,100,200)
right_edges = cv2.bitwise_and(right_edges,right_roi_mask)
left_edges = cv2.bitwise_and(left_edges,left_roi_mask)
# Standard Hough Transform
right_lines = cv2.HoughLines(right_edges,0.8,np.pi/180,35)
left_lines = cv2.HoughLines(left_edges,0.8,np.pi/180,30)
xm = cols/2
ym = rows
# Draw right lane
x = []
i = 0
if right_lines is not None:
right_lines = np.array(right_lines[0])
for rho, theta in right_lines:
a=np.cos(theta)
b=np.sin(theta)
x0,y0=a*rho,b*rho
y3 = 140
x3 = int(x0+((y0-y3)*np.sin(theta)/np.cos(theta)))
x.insert(i,x3)
i+1
pt1=(int(x0+1000*(-b)),int(y0+1000*(a)))
pt2=(int(x0-1000*(-b)),int(y0-1000*(a)))
cv2.line(img,pt1,pt2,(255,0,0),2)
if len(x) != 0:
xmin = x[0]
for k in range(0,len(x)):
if x[k] < xmin and x[k] > 0:
xmin = x[k]
kr = int(np.sqrt(((xmin-xm)*(xmin-xm))+((y3-ym)*(y3-ym))))
else:
kr = 0
xmin = 0
# Draw left lane
x = []
i = 0
if left_lines is not None:
left_lines = np.array(left_lines[0])
for rho, theta in left_lines:
a=np.cos(theta)
b=np.sin(theta)
x0,y0=a*rho,b*rho
y3 = 140
x3 = int(x0+((y0-y3)*np.sin(theta)/np.cos(theta)))
x.insert(i,x3)
i+1
pt1=(int(x0+1000*(-b)),int(y0+1000*(a)))
pt2=(int(x0-1000*(-b)),int(y0-1000*(a)))
cv2.line(img,pt1,pt2,(0,255,0),2)
if len(x) != 0:
xmax = x[0]
for k in range(0,len(x)):
if x[k] > xmax and x[k]<cols:
xmax = x[k]
kl = int(np.sqrt(((xmax-xm)*(xmax-xm))+((y3-ym)*(y3-ym))))
else:
kl = 0
xmax = 0
error = kr - kl
#end time
end_time = cv2.getTickCount()
time_count= (end_time - start_time) / cv2.getTickFrequency()
# rospy.loginfo(time_count)
if right_lines is not None and left_lines is not None:
rospy.loginfo(error)
if error > 150:
error = 150
elif error < -150:
error = -150
message = error
elif left_lines is not None and right_lines is None:
rospy.loginfo("Turn Right")
rospy.loginfo(kl)
message = 152 #turn right
elif left_lines is None and right_lines is not None:
rospy.loginfo("Turn Left")
message = 153 #turn let
elif left_lines is None and right_lines is None:
rospy.loginfo("No line")
message = 155 #no line found
else:
message = 155 #no line found
pub.publish(message)
image = bridge.cv2_to_imgmsg(img, "bgr8")
pub_image.publish(image)
def lane_detection():
rospy.init_node('lane-detection',anonymous=True)
rospy.Subscriber("/raspicam_node/image/compressed",CompressedImage,callback,queue_size=1,buff_size=2**24)
try:
rospy.loginfo("Enetering ROS Spin")
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
try:
lane_detection()
except rospy.ROSInterruptException:
pass
|
isarlab-department-engineering/ros_dt_lane_follower
|
src/lane_detection.py
|
Python
|
bsd-3-clause
| 5,227
|
[
"Gaussian"
] |
603e4afb4f01e9701d369868aab8150378d9c68883908f67dd0996f3f76d62b0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""rna_clanstix - a tool for visualizing RNA 3D structures based on pairwise structural similarity with Clans.
We hacked Clans thus instead of BLAST-based distances between sequences, you can analyze distances between structures described as p-values of rmsd (based on the method from the Dokholyan lab.)
Quickref::
rna_clanstix.py --groups-auto 10 --color-by-homolog --shape-by-source thf_ref_mapping_pk_refX.txt input2.clans
Running Clans:
To run CLANS you need to have Java 1.4 or better installed (java can be downloaded HERE). For full functionality you will also need the NCBI BLAST,PSI-BLAST and formatdb executables (NCBI). For command line parameters and basic help please refer to the README file.
(source: http://www.eb.tuebingen.mpg.de/research/departments/protein-evolution/software/clans.html)
.. image:: ../../rna_tools/tools/clanstix/doc/yndSrLTb7l.gif
The RMSDs between structures are converted into p-values based on the method from the Dokholyan lab or some hacky way developed by mmagnus .
Color groups
---------------------------------------
You can color your groups:
.. image:: ../../rna_tools/tools/clanstix/doc/rna_clanstix.png
To get colors, run a cmd like this::
rna_clastix.py rnapz17_matrix_farfar_HelSeedCst.txt --groups 20:seq1+20+20+20+20+20+20:seq10
where with the ``+`` sign you separate groups. Each group has to have a number of structures. Optionally it can have a name, e.g., ``20:seq1``, use ``:`` as a separator. If a provided name is ``native`` then this group will be shown as starts.
Get inspiration for more colors (http://www.rapidtables.com/web/color/RGB_Color.htm)
How to use ClanstixRNA?
----------------------------------------
1. Get a matrix of distances, save it as e.g. matrix.txt (see Comment below)
2. run ClanstixRNA on this matrix to get an input file to Clans (e.g. clans_rna.txt)::
rna_clanstix.py test_data/matrix.txt # clans.input will be created by default
3. open CLANS and click File -> Load run and load clans_run.txt
4. You're done! :-)
Comment: To get this matrix you can use for example another tool from the rna-pdb-tools packages::
rna_calc_rmsd_all_vs_all.py -i rp18 -o rp18_rmsd.csv
rna_clastix.py --groups 1:native+5:3dRNA+
5:Chen+3:Dokh+5:Feng+5:LeeASModel+
5:Lee+5:RNAComposer+10:RW3D+5:Rhiju+
1:YagoubAli+3:SimRNA rp18_rmsd.csv clans.in
rna_clastix.py --groups 100+100+100+100+100+100+100+100+100+100+1:native rp18_rmsd.csv
where ``rp18`` is a folder with structure and ``rp18_rmsd.csv`` is a matrix of all-vs-all rmsds.
.. image:: ../../rna_tools/tools/clanstix/doc/rp18_clanstix.png
Hajdin, C. E., Ding, F., Dokholyan, N. V, & Weeks, K. M. (2010). On the significance of an RNA tertiary structure prediction. RNA (New York, N.Y.), 16(7), 1340–9. doi:10.1261/rna.1837410
An output of this tool can be viewed using CLANS.
Frickey, T., & Lupas, A. (2004). CLANS: a Java application for visualizing protein families based on pairwise similarity. Bioinformatics (Oxford, England), 20(18), 3702–4. doi:10.1093/bioinformatics/bth444
"""
from __future__ import print_function
import argparse
import rna_tools.tools.rna_prediction_significance.rna_prediction_significance as pv
import numpy as np
import math
import logging
import time
logging.basicConfig(level=logging.INFO,
format='%(message)s',
datefmt='%m-%d %H:%M',
filename='rna_clanstix.log',
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
log = logging.getLogger()
log.setLevel(logging.INFO)
class RNAStructClans:
"""Clans run.
Usage::
>>> f = open('matrix.txt')
>>> ids = f.readline().replace('#','').split()
>>> c = RNAStructClans(n=len(ids)) # 200?
>>> c.add_ids(ids)
>>> c.dist_from_matrix(f)
>>> print(c.txt)
"""
def __init__(self, n=10, dotsize=10):
self.n = n
self.comment = ''
#cluster2d=false
self.txt = """sequences=%i
<param>
maxmove=0.1
pval=%s
usescval=false
complexatt=true
cooling=1.0
currcool=1.0
attfactor=10.0
attvalpow=1
repfactor=10.0
repvalpow=1
dampening=1.0
minattract=1.0
cluster2d=true
blastpath=''
formatdbpath=''
showinfo=false
zoom=0.9
dotsize=%s
ovalsize=10
groupsize=4
usefoldchange=false
avgfoldchange=false
colorcutoffs=0.0;0.1;0.2;0.3;0.4;0.5;0.6;0.7;0.8;0.9;
colorarr=(230;230;230):(207;207;207):(184;184;184):(161;161;161):(138;138;138):(115;115;115):(92;92;92):(69;69;69):(46;46;46):(23;23;23):
</param>""" % (n, args.pvalue, str(dotsize))
def add_ids(self, ids):
t = '\n<seq>\n'
for i in ids:
t += '>' + i + '\n'
t += 'X' + '\n'
t += '</seq>\n'
if len(ids) != self.n:
# print 'n != ids'
raise Exception('n != ids')
self.txt += t
def dist_from_matrix(self, rmsds, matrix=0, use_pv=False, use_input_values=False, dont_calc=False, debug=False):
if dont_calc:
print('Everything but the dists are generated. Use it to edit the original clans input file.')
return # for some hardcore debugging ;-)
t = '\n<hsp>\n'
c = 0
c2 = 0
if debug: print(rmsds)
for row in rmsds:
for rmsd in row:
if c != c2:
if use_input_values:
dist = float(str(rmsd).replace('e', 'E'))
elif use_pv:
dist = pv.get_p_value(rmsd, 1 * 38)[0] # r.get_rmsd_to(r2), 3)
else:
# 1e-06 10-1 = 9 10-10 0
dist = '1.0E-' + str(int(math.floor(matrix.max()) - int(float(rmsd))))
t += str(c) + ' ' + str(c2) + ':' + str(dist) + '\n'
c2 += 1
c2 = 0
c += 1
t += '</hsp>\n'
if not use_input_values:
max = math.ceil(matrix.max())
min = matrix[matrix>0].min()
self.comment = '# max: %f min (non-zero): %f\n' % (max, min)
self.comment += '# 1/4 ' + str((max - min) / 4) + ' ' + str(round((max - min) / 4, 0)) + '\n'
self.comment += '# 1/2 ' + str((max - min) / 2) + ' ' + str(round((max - min) / 2, 0)) + '\n'
self.comment += '# 1/3 ' + str(((max - min) / 4 ) * 3 ) + ' ' + str(round(((max - min) / 4) * 3, 0)) + '\n'
for i in range(1,20):
self.comment += '# connected points with RMSD lower than %iA 1.0E-%i\n' % (i, math.ceil(matrix.max()) - i)
# 1E-11 = 0
# 1E-10 = 1-0
# 1E-9 = 2-1
self.txt += t
print(t)
def dist_from_matrix_mp(self, output_pmatrix_fn, max, min, lines, pmat=False, use_pv=False, use_input_values=False, debug=False):
if debug:
print('Everything but the dists are generated. Use it to edit the original clans input file.')
return # for some hardcore debugging ;-)
t = '\n<hsp>\n'
myp = ''
c = 0
c2 = 0
for l in lines:
for rmsd in l:
if c != c2:
if use_input_values:
dist = rmsd.replace('e', 'E')
if use_pv:
dist = pv.get_p_value(rmsd, 1 * 38)[0] # r.get_rmsd_to(r2), 3)
else:
# 1e-06 10-1 = 9 10-10 0
dist = '1.0E-' + str(int(math.floor(matrix.max()) - int(float(rmsd))))
t += str(c) + ' ' + str(c2) + ':' + str(dist) + '\n'
myp += ' ' + str(dist)
else:
myp += ' ' + '0.0'
c2 += 1
myp += '\n'
c2 = 0
c += 1
t += '</hsp>\n'
if not use_input_values:
max = math.ceil(matrix.max())
min = matrix[matrix>0].min()
self.comment = '# max: %f min (non-zero): %f\n' % (max, min)
self.comment += '# 1/4 ' + str((max - min) / 4) + ' ' + str(round((max - min) / 4, 0)) + '\n'
self.comment += '# 1/2 ' + str((max - min) / 2) + ' ' + str(round((max - min) / 2, 0)) + '\n'
self.comment += '# 1/3 ' + str(((max - min) / 4 ) * 3 ) + ' ' + str(round(((max - min) / 4) * 3, 0)) + '\n'
for i in range(1,20):
self.comment += '# connected points with RMSD lower than %iA 1.0E-%i\n' % (i, math.ceil(matrix.max()) - i)
# 1E-11 = 0
# 1E-10 = 1-0
# 1E-9 = 2-1
self.txt += t
if pmat:
with open(output_pmatrix_fn, 'w') as f:
f.write(myp)
return t
def check_symmetric(a, rtol=1e-05, atol=1e-08):
"""
https://stackoverflow.com/questions/42908334/checking-if-a-matrix-is-symmetric-in-numpy
"""
return np.allclose(a, a.T, rtol=rtol, atol=atol)
def get_parser():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('matrixfn', help="matrix")
parser.add_argument('--groups-auto', help="define into how many groups make automatically", type=int)
parser.add_argument('--color-by-homolog', help="color the same homolog in the same way", action='store_true')
parser.add_argument('--one-target', help="color the same homolog in the same way", action='store_true')
parser.add_argument('--shape-by-source', help="shape points based on source, SimRNA vs Rosetta (Rosetta models have 'min' in name')",
action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--groups-dot-size', type=int, default=8)
parser.add_argument('--dont-calc', action='store_true', help="A simple and dirty trick to get "
"generates everything but the main distances from the matrix"
"useful if you want to run the script to generate different settings, such as"
"colors, groups etc. Run the script and then replace parts of the original "
"file with the matrix")
parser.add_argument('--groups', help="groups, at the moment up to 7 groups can be handle"
"--groups 1:native+100:zmp+100:zaa+100:zbaa+100:zcp+100:znc"
"--groups 1:native+100:nats+100:hom1+100:hom2+100:hom3+100:hom4"
"native will light green"
"zmp will be forest green"
"(easy to be changed in the future)")
parser.add_argument('--use-pvalue', action='store_true', help="")
parser.add_argument('--use-input-values', action='store_true', help="")
parser.add_argument('--pvalue', default="1.0E-15", help="set p-value for clans.input, default: 1.0E-15")
parser.add_argument('--output', help="input file for clans, e.g., clans.input", default="clans.input")
parser.add_argument('--output-pmatrix', action='store_true', help="p value matrix will be saved, see --output-matrix-fn to define name")
parser.add_argument('--output-pmatrix-fn', default="pmatrix.txt", help="filename of output matrix, pmatrix.txt by default")
parser.add_argument('--multiprocessing', action='store_true', help="run calculations in parallel way, warning: extra libraries required, see the Docs")
return parser
#main
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
debug = args.debug # as a short cut later
f = open(args.matrixfn)
# OK, check if there is a header = the line with '#'
headers = f.readline()
if headers.strip().startswith('#'):
# if yes, then split remove # and split into lists
ids = headers.replace('#', '').split()
for i in ids:
print(i)
else:
# if no, then make a list form [0, # of items in the first line]
ids = [str(i) for i in range(0, len(headers.split()))]
f.seek(0)
# get max
logging.info(time.strftime("%Y-%m-%d %H:%M:%S"))
# Collect rmsds into a list
rmsds = []
for l in f:
#for rmsd in map(float,l.split()): # map(float, s.split())
rmsds.append(map(float,l.split()))
# warning:
# eh, this matrix is not really used by clanstix main engine
# it's used to calc min and max value of a matrix
matrix = np.loadtxt(args.matrixfn)
if check_symmetric(matrix):
if args.debug: print('Matrix is symmetrical! ', matrix.shape)
else:
raise Exception('Matrix is not symmetrical! Check your matrix', matrix.shape)
# keep dot quite big by default,
# but if you use dotsize then keep this one 0
dotsize = 8
if args.groups_auto or args.groups:
dotsize = 0
c = RNAStructClans(n=len(ids), dotsize=dotsize) # 200?
c.add_ids(ids)
if debug:
print('dist_from_matrix...')
if args.multiprocessing:
import multiprocessing as mp
#import dill
#import parmap
from pathos.multiprocessing import ProcessingPool
matrix = np.loadtxt(args.matrixfn)
max = int(math.floor(matrix.max()))
min = matrix[matrix>0].min()
clans_list_of_pvalues = ''
pool = ProcessingPool(mp.cpu_count())
x=pool.map(c.dist_from_matrix_mp, [args.output_pmatrix_fn], [max], [min], [rmsds], [args.output_pmatrix], )
pool.close()
for i in x:
clans_list_of_pvalues = clans_list_of_pvalues.join(i)
else:
c.dist_from_matrix(rmsds, matrix, args.use_pvalue, args.use_input_values, args.dont_calc, args.debug)
if debug:
print('process the matrix...')
#
# DEFINE GROUPS
#
# 1+20+20+20+20+20
seqgroups = ''
#colors = ['0;255;102;255', # ligthgreen 1
# '0;102;0;255', # forest 2
# A list of colors used later ....
colors = [
'255;102;102;255', # red 3
'51;51;255;255', # blue 4
'0;255;255;255', # light blue +1
'180;38;223;255', # violet 5
'64;64;64;255', # grey 6
'255;128;0;255', # orange 7
'240;230;140;255', #khaki
'210;105;30;255', # chocolate
'0;255;255;255', # cyjan
'128;0;128;255', # purple 8
'0;128;128;255', # Teal 9
'128;0;0;255', # maroon 10
]
# This is pretty much the same list as above, but in here I have more distinguishable colors,
# as I should be used with less number of groups
colors_homologs = [
'255;102;102;255', # red 3
'51;51;255;255', # blue 4
'64;64;64;255', # grey 6
'128;0;128;255', # purple 8
'128;0;0;255', # maroon 10
'0;255;255;255', # cyjan
'237;41;57;255', # red
#'210;105;30;255', # chocolate
]
# OK, this is the trick
# if args.groups_auto is on, then you built args.groups automatically,
# and then just simply run the next if as it was args.groups in the arguments
# pretty cool ;-)
if args.groups_auto:
# arsg_groups = ''
from collections import OrderedDict
# collect groups
#groups = []
groups = OrderedDict()
for i in ids:
#
# collect homologs gmp_
# simrna and farna
print(i)
group_name = i[:args.groups_auto]
if group_name in groups:
groups[group_name] += 1
else:
groups[group_name] = 1
groups_str = ''
for g in groups:
groups_str += str(groups[g]) + ':' + g + '+'
#
# this is the trick, you
args.groups = groups_str[:-1]
print(groups_str)
# change this to get 1:hccp+10:zmp+10:xrt
if args.groups:
# this is a manual way how to do it
groups = args.groups.split('+')
seqgroups = '<seqgroups>\n'
curr_number = 0
homologs_colors = {}
if args.debug: print(args.groups)
for index, group in enumerate(groups):
# parse groups
# type and size will be changed for native
size = args.groups_dot_size
dottype = 0
color = '' # use for diff color selection if tar
if ':' in group:
nstruc, name = group.split(':')
if name == 'native':
dottype = 8
size = 20
color = '0;128;128;255' # olive
if 'solution' in name:
dottype = 8
size = 30 # solution is bigger
color = '0;128;128;255' # olive
if args.one_target: # target is target, dont distinguish it
if 'tar' in name and 'tar_min' not in name: # SimRNA
dottype = 7
size = size # 7 # size of SimRNA reference seq points
color = '0;255;102;255' # ligthgreen 1
if 'tar_min' in name:
dottype = 9
size = size # 7 # size of Rosetta reference seq points
color = '0;102;0;255' # forest 2
else:
if 'tar' in name: # SimRNA
size = size #8 # size of SimRNA reference seq points
dottype = 0
color = '0;255;102;255' # ligthgreen 1
if args.shape_by_source:
# Rosetta models are diamond now
if 'min' in name:
dottype = 2
# color by homolog
if args.color_by_homolog:
# 10:gxx_6bd266+10:gxx_min.ou+10:gbaP_d2b57+10:gbaP_min.o+10:gbx_00de79+10:gbx_min.ou+10:gapP_d9d22+10:gapP_min.o+10:gmp_faa97e+10:gmp_min.ou
tmp = name.split('_')
homolog_name = tmp[0]
if debug: 'homolog_name', homolog_name
# ignore tar and solution, their colors are defined above ^
## [mm] simrna5x100farna5x100$ git:(master) ✗ rna_clastix.py --groups-auto 8 --color-by-homolog --shape-by-source rp14sub_ref_mapping_refX.txt input2.clans --debug
## 2019-01-09 12:23:21
## 100:tar_min.+100:tar_rp14+1:solution+100:cy2_min.+100:cy2_r14a+100:aj6_min.+100:aj6_r14a
## {'cy2': '210;105;30;255'}
## {'cy2': '210;105;30;255'}
## {'cy2': '210;105;30;255', 'aj6': '0;255;255;255'}
## {'cy2': '210;105;30;255', 'aj6': '0;255;255;255'}
## # max: 18.000000 min (non-zero): 0.810000
if homolog_name == 'tar' or homolog_name.startswith('solution'):
pass
else:
if homolog_name in homologs_colors:
color = homologs_colors[homolog_name]
else:
homologs_colors[homolog_name] = colors_homologs.pop()
color = homologs_colors[homolog_name]
if debug: print(homologs_colors)
else:
nstruc = group
name = 'foo'
# color is set in the args.color_by_homologs
# craft seqgroups
seqgroups += '\n'
seqgroups += "name=%s\n" % name
# color hack
if color: # this color is fixed for native right now
seqgroups += "color=%s\n" % color
else:
# if beyond index, use different shape
#try:
#print(index, len(colors))
if index >= len(colors):
index = index - len(colors) # reset color index
dottype = 6 # set dottype when the colors are done
seqgroups += "color=%s\n" % colors[index]
seqgroups += "type=%i\n" % dottype
# color hack
seqgroups += "size=%i\n" % size
seqgroups += "hide=0\n"
if debug:
print('name: ' + name + ' ' + colors[index])
# get numbers - convert nstruc into numbers in Clans format 0;1; etc.
# remember: it starts from 0
# --groups 1:hccp+10:zmp+10:xrt
# hccp
# [0]
# zmp
# [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# xrt
# [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
numbers = range(curr_number, curr_number + int(nstruc)) # 0, 1
curr_number = curr_number + int(nstruc)
seqgroups += "numbers=%s;\n" % ';'.join([str(number) for number in numbers])
seqgroups += '</seqgroups>\n'
with open(args.output, 'w') as f:
f.write(c.txt)
if args.multiprocessing:
f.write(clans_list_of_pvalues)
f.write(seqgroups)
f.write(c.comment)
# if debug: print(c.txt)
print(c.comment)
logging.info(time.strftime("%Y-%m-%d %H:%M:%S"))
if debug:
print(seqgroups)
|
mmagnus/rna-pdb-tools
|
rna_tools/tools/clanstix/rna_clanstix.py
|
Python
|
gpl-3.0
| 21,529
|
[
"BLAST"
] |
6d5e250d8eb968fba1a4ac92fec858f34c66ed19c0fec1027542c65837b9ae58
|
#!/usr/bin/env python
"""
Functions that use FreeSurfer commands.
Authors:
- Arno Klein, 2012-2013 (arno@mindboggle.info) http://binarybottle.com
Copyright 2013, Mindboggle team (http://mindboggle.info), Apache v2.0 License
"""
def label_with_classifier(subject, hemi, left_classifier='',
right_classifier='', annot_file='',
subjects_directory=''):
"""
Label a brain with the DKT atlas using FreeSurfer's mris_ca_label
FreeSurfer documentation ::
SYNOPSIS
mris_ca_label [options] <subject> <hemi> <surf> <classifier> <output>
DESCRIPTION
For a single subject, produces an annotation file, in which each
cortical surface vertex is assigned a neuroanatomical label.
This automatic procedure employs data from a previously-prepared atlas
file. An atlas file is created from a training set, capturing region
data manually drawn by neuroanatomists combined with statistics on
variability correlated to geometric information derived from the
cortical model (sulcus and curvature). Besides the atlases provided
with FreeSurfer, new ones can be prepared using mris_ca_train).
Notes regarding creation and use of FreeSurfer Gaussian classifier atlas:
Create the DKT classifier atlas (?h.DKTatlas40.gcs) --NEED TO VERIFY THIS:
$ mris_ca_train -t $FREESURFER_HOME/average/colortable_desikan_killiany.txt \
$hemi sphere.reg aparcNMMjt.annot $SCANS ./$hemi.DKTatlas40.gcs
Label a brain with the DKT atlas (surface annotation file ?h.DKTatlas40.annot):
$ mris_ca_label -l ./$x/label/$hemi.cortex.label $x/ $hemi sphere.reg \
./$hemi.DKTatlas40.gcs ./$x/label/$hemi.DKTatlas40.annot
Label the cortex of a subject's segmented volume according
to the edited surface labels (?h.aparcNMMjt.annot):
$ mri_aparc2aseg --s ./x --volmask --annot aparcNMMjt
Label a brain with the DKT atlas using FreeSurfer's mris_ca_label:
$ mris_ca_label MMRR-21-1 lh lh.sphere.reg ../lh.DKTatlas40.gcs ../out.annot
Parameters
----------
subject : string
subject corresponding to FreeSurfer subject directory
hemi : string
hemisphere ['lh' or 'rh']
left_classifier : string
name of left hemisphere FreeSurfer classifier atlas (full path)
right_classifier : string
name of right hemisphere FreeSurfer classifier atlas (full path)
annot_file : string
name of output .annot file
subjects_directory : string
FreeSurfer subjects directory (mris_ca_label -sdir option)
Returns
-------
annot_file : string
name of output .annot file
Examples
--------
>>> # This example requires a FreeSurfer subjects/<subject> subdirectory
>>> import os
>>> from mindboggle.utils.freesurfer import label_with_classifier
>>> subject = 'Twins-2-1'
>>> hemi = 'lh'
>>> left_classifier = '/homedir/mindboggle_cache/b28a600a713c269f4c561f66f64337b2/lh.DKTatlas40.gcs'
>>> right_classifier = ''
>>> annot_file = './lh.classifier.annot'
>>> subjects_directory = ''
>>> label_with_classifier(subject, hemi, left_classifier, right_classifier, annot_file, subjects_directory)
>>> #
>>> # View:
>>> from mindboggle.utils.freesurfer import annot_to_vtk
>>> from mindboggle.utils.plots import plot_surfaces
>>> path = os.environ['MINDBOGGLE_DATA']
>>> vtk_file = os.path.join(path, 'arno', 'freesurfer', 'lh.pial.vtk')
>>> output_vtk = './lh.classifier.vtk'
>>> #
>>> labels, output_vtk = annot_to_vtk(annot_file, vtk_file, output_vtk)
>>> plot_surfaces(output_vtk)
"""
import os
from mindboggle.utils.utils import execute
if not annot_file:
annot_file = os.path.join(os.getcwd(), hemi + '.classifier.annot')
if hemi == 'lh':
classifier = left_classifier
elif hemi == 'rh':
classifier = right_classifier
else:
print("label_with_classifier()'s hemi should be 'lh' or 'rh'")
if subjects_directory:
sdir = ' -sdir ' + subjects_directory
else:
sdir = ''
cmd = ['mris_ca_label', subject, hemi, hemi+'.sphere.reg', classifier,
annot_file, sdir]
execute(cmd)
if not os.path.exists(annot_file):
raise(IOError("mris_ca_label did not create " + annot_file + "."))
return annot_file
# def convert_mgh_to_native_nifti_mri_vol2vol(input_file, reference_file,
# output_file='', interp='nearest'):
# """
# Convert volume from FreeSurfer 'unconformed' to original space
# in nifti format using FreeSurfer's mri_vol2vol.
#
# Note: FreeSurfer's mri_convert example results in type: SHORT (4),
# while mri_vol2vol results in type: FLOAT (3), as does scipy.ndimage.
# The mri_vol2vol command is ::
#
# mri_vol2vol --mov <input_file> --targ <reference_file>
# --interp trilin --regheader --o <output_file>
#
# Parameters
# ----------
# input_file : string
# input file name
# reference_file : string
# file in original space
# output_file : string
# name of output file
# interp : string
# interpolation method {trilin, nearest}
#
# Returns
# -------
# output_file : string
# name of output file
#
# """
# import os
#
# from mindboggle.utils.utils import execute
#
# # Convert volume from FreeSurfer to original space:
# print("Convert volume from FreeSurfer 'unconformed' to original space...")
#
# if not os.path.exists(input_file):
# raise(IOError("Input file " + input_file + " not found"))
# if not os.path.exists(reference_file):
# raise(IOError("Reference file " + reference_file + " not found."))
# if not output_file:
# output_file = os.path.join(os.getcwd(),
# os.path.basename(input_file).split('mgz')[0] + 'nii.gz')
#
# cmd = ['mri_vol2vol',
# '--mov', input_file,
# '--targ', reference_file,
# '--interp', interp,
# '--regheader --o', output_file]
# execute(cmd)
# if not os.path.exists(output_file):
# raise(IOError("mri_vol2vol did not create " + output_file + "."))
#
# return output_file
#
#
# def annot_labels_to_volume(subject, annot_name, original_space, reference):
# """
# Propagate surface labels through hemisphere's gray matter volume
# using FreeSurfer's mri_aparc2aseg.
#
# Note ::
# From the mri_aparc2aseg documentation:
# The volumes of the cortical labels will be different than
# reported by mris_anatomical_stats because partial volume information
# is lost when mapping the surface to the volume. The values reported by
# mris_anatomical_stats will be more accurate than the volumes from the
# aparc+aseg volume.
#
# Parameters
# ----------
# subject : string
# subject name
# annot_name: string
# FreeSurfer annot filename without the hemisphere prepend or .annot append
# original_space: Boolean
# convert from FreeSurfer unconformed to original space?
# reference : string
# file in original space
#
# Returns
# -------
# output_file : string
# name of output file
#
# """
# import os
#
# from mindboggle.utils.freesurfer import convert_mgh_to_native_nifti
# from mindboggle.utils.utils import execute
#
# # Fill hemisphere gray matter volume with surface labels using FreeSurfer:
# print("Fill gray matter volume with surface labels using FreeSurfer...")
#
# output_file1 = os.path.join(os.getcwd(), annot_name + '.nii.gz')
#
# cmd = ['mri_aparc2aseg', '--s', subject, '--annot', annot_name,
# '--o', output_file1]
# execute(cmd)
# if not os.path.exists(output_file1):
# raise(IOError("mri_aparc2aseg did not create " + output_file1 + "."))
#
# # Convert label volume from FreeSurfer to original space:
# if original_space:
#
# output_file2 = os.path.join(os.getcwd(), annot_name + '.native.nii.gz')
# output_file = convert_mgh_to_native_nifti(output_file1, reference,
# output_file2, interp='nearest')
# else:
# output_file = output_file1
#
# if not os.path.exists(output_file):
# raise(IOError("Output file " + output_file + " not created."))
#
# return output_file
#
#
#def register_template(hemi, sphere_file, transform,
# templates_path, template):
# """
# Register surface to template with FreeSurfer's mris_register.
#
# Transform the labels from multiple atlases via a template
# (using FreeSurfer's mris_register).
#
# """
# import os
#
# from mindboggle.utils.utils import execute
#
# template_file = os.path.join(templates_path, hemi + '.' + template)
# output_file = hemi + '.' + transform
#
# cmd = ['mris_register', '-curv', sphere_file, template_file, output_file]
# execute(cmd)
# if not os.path.exists(output_file):
# raise(IOError(output_file + " not found"))
#
# return transform
#
#
#def transform_atlas_labels(hemi, subject, transform,
# subjects_path, atlas, atlas_string):
# """
# Transform atlas labels.
#
# Read in the FreeSurfer *.annot file for a subject's brain hemisphere,
# .
#
# Transform the labels from a surface atlas via a template
# using FreeSurfer's mri_surf2surf (wrapped in Nipype).
#
# nipype.workflows.smri.freesurfer.utils.fs.SurfaceTransform
# wraps command ``mri_surf2surf`` ::
#
# "Transform a surface file from one subject to another via a spherical
# registration. Both the source and target subject must reside in your
# Subjects Directory, and they must have been processed with recon-all,
# unless you are transforming to one of the icosahedron meshes."
#
# Parameters
# ----------
# hemi : string
# hemisphere ['lh' or 'rh']
# subject : string
# subject corresponding to FreeSurfer subject directory
# transform : string
# name of FreeSurfer spherical surface registration transform file
# subjects_path : string
# name of FreeSurfer subjects directory
# atlas : string
# name of atlas
# atlas_string : string
# name of atlas labeling protocol
#
# Returns
# -------
# output_file : string
# name of the output file
#
# """
# import os
# from nipype.interfaces.freesurfer import SurfaceTransform
#
# sxfm = SurfaceTransform()
# sxfm.inputs.hemi = hemi
# sxfm.inputs.target_subject = subject
# sxfm.inputs.source_subject = atlas
#
# # Source file
# sxfm.inputs.source_annot_file = os.path.join(subjects_path,
# atlas, 'label',
# hemi + '.' + atlas_string + '.annot')
# # Output annotation file
# output_file = os.path.join(os.getcwd(), hemi + '.' + atlas + '.' +
# atlas_string + '_to_' + subject + '.annot')
# sxfm.inputs.out_file = output_file
#
# # Arguments: strings within registered files
# args = ['--srcsurfreg', transform,
# '--trgsurfreg', transform]
# sxfm.inputs.args = ' '.join(args)
#
# sxfm.run()
#
# if not os.path.exists(output_file):
# raise(IOError(output_file + " not found"))
#
# return output_file
#
#
#def vote_labels(label_lists):
# """
# For each vertex, vote on the majority label.
#
# Parameters
# ----------
# label_lists : list of lists of integers
# vertex labels assigned by each atlas
#
# Returns
# -------
# labels_max : list of integers
# majority labels for vertices
# label_counts : list of integers
# number of different labels for vertices
# label_votes : list of integers
# number of votes for the majority labels
#
# Examples
# --------
# >>> from collections import Counter
# >>> X = [1,1,2,3,4,2,1,2,1,2,1,2]
# >>> Votes = Counter(X)
# >>> Votes
# Counter({1: 5, 2: 5, 3: 1, 4: 1})
# >>> Votes.most_common(1)
# [(1, 5)]
# >>> Votes.most_common(2)
# [(1, 5), (2, 5)]
# >>> len(Votes)
# 4
#
# """
# from collections import Counter
#
# print("Begin voting...")
# n_atlases = len(label_lists) # number of atlases used to label subject
# npoints = len(label_lists[0])
# labels_max = [-1 for i in range(npoints)]
# label_counts = [1 for i in range(npoints)]
# label_votes = [n_atlases for i in range(npoints)]
#
# consensus_vertices = []
# for vertex in range(npoints):
# votes = Counter([label_lists[i][vertex] for i in range(n_atlases)])
#
# labels_max[vertex] = votes.most_common(1)[0][0]
# label_votes[vertex] = votes.most_common(1)[0][1]
# label_counts[vertex] = len(votes)
# if len(votes) == n_atlases:
# consensus_vertices.append(vertex)
#
# print("Voting done.")
#
# return labels_max, label_votes, label_counts, consensus_vertices
#
#
#def majority_vote_label(surface_file, annot_files):
# """
# Load a VTK surface and corresponding FreeSurfer annot files.
# Write majority vote labels, and label counts and votes as VTK files.
#
# Parameters
# ----------
# surface_file : string
# name of VTK surface file
# annot_files : list of strings
# names of FreeSurfer annot files
#
# Returns
# -------
# labels_max : list of integers
# majority labels for vertices
# label_counts : list of integers
# number of different labels for vertices
# label_votes : list of integers
# number of votes for the majority labels
# consensus_vertices : list of integers
# indicating which are consensus labels
# maxlabel_file : string
# name of VTK file containing majority vote labels
# labelcounts_file : string
# name of VTK file containing number of different label counts
# labelvotes_file : string
# name of VTK file containing number of votes per majority label
#
# """
# from os import path, getcwd
# import nibabel as nb
# import pyvtk
# from mindboggle.utils.freesurfer import vote_labels
# from mindboggle.utils.io_table import string_vs_list_check
#
# # Load multiple label sets
# print("Load annotation files...")
# label_lists = []
# for annot_file in annot_files:
# labels, colortable, names = nb.freesurfer.read_annot(annot_file)
# label_lists.append(labels)
# print("Annotations loaded.")
#
# # Vote on labels for each vertex
# labels_max, label_votes, label_counts, \
# consensus_vertices = vote_labels(label_lists)
#
# # Check type to make sure the filename is a string
# # (if a list, return the first element)
# surface_file = string_vs_list_check(surface_file)
#
# # Save files
# VTKReader = pyvtk.VtkData(surface_file)
# Vertices = VTKReader.structure.points
# Faces = VTKReader.structure.polygons
#
# output_stem = path.join(getcwd(), path.basename(surface_file.strip('.vtk')))
# maxlabel_file = output_stem + '.labels.max.vtk'
# labelcounts_file = output_stem + '.labelcounts.vtk'
# labelvotes_file = output_stem + '.labelvotes.vtk'
#
# pyvtk.VtkData(pyvtk.PolyData(points=Vertices, polygons=Faces),\
# pyvtk.PointData(pyvtk.Scalars(labels_max,\
# name='Max (majority labels)'))).\
# tofile(maxlabel_file, 'ascii')
#
# pyvtk.VtkData(pyvtk.PolyData(points=Vertices, polygons=Faces),\
# pyvtk.PointData(pyvtk.Scalars(label_counts,\
# name='Counts (number of different labels)'))).\
# tofile(labelcounts_file, 'ascii')
#
# pyvtk.VtkData(pyvtk.PolyData(points=Vertices, polygons=Faces),\
# pyvtk.PointData(pyvtk.Scalars(label_votes,\
# name='Votes (number of votes for majority labels)'))).\
# tofile(labelvotes_file, 'ascii')
#
# if not os.path.exists(maxlabel_file):
# raise(IOError(maxlabel_file + " not found"))
# if not os.path.exists(labelcounts_file):
# raise(IOError(labelcounts_file + " not found"))
# if not os.path.exists(labelvotes_file):
# raise(IOError(labelvotes_file + " not found"))
#
# return labels_max, label_counts, label_votes, consensus_vertices, \
# maxlabel_file, labelcounts_file, labelvotes_file
#def relabel_annot_file(hemi, subject, annot_name, new_annot_name, relabel_file):
# """
# Combine surface labels in a .annot file.
#
# https://mail.nmr.mgh.harvard.edu/pipermail//freesurfer/2010-June/014620.html
#
# `mris_translate_annotation <subject> <hemi> <in annot> <translation file> <out annot>`
#
# ``translation file``: text file that lists the labels (one per line)
# you want to group, and the new label you want to create. You have to use
# the RGB codes; each line will provide the input and output RGB values::
#
# 221 220 60 223 220 60
# 221 220 160 223 220 60
# 221 220 100 223 220 60
#
# Parameters
# ----------
# hemi : string
# hemisphere ['lh' or 'rh']
# subject : string
# subject name
# annot_name : string
# name of .annot file (without pre- or post-pends)
# relabel_file : string
# text file with old and new RGB values
# new_annot_name : string
# new .annot name
#
# Returns
# -------
# new_annot_name : string
# new .annot name
#
# """
# from nipype.interfaces.base import CommandLine
#
# cli = CommandLine(command='mris_translate_annotation')
# cli.inputs.args = ' '.join([subject, hemi, annot_name, relabel_file,
# new_annot_name])
# cli.cmdline
# cli.run()
#
# return new_annot_name
#def thickness_to_ascii(hemi, subject, subjects_path):
# """
# Convert a FreeSurfer thickness (per-vertex) file
# to an ascii file.
#
# Note: Untested function
#
# Parameters
# ----------
# hemi : string indicating left or right hemisphere
# subject_path: string
# path to subject directory where the binary FreeSurfer
# thickness file is found ("lh.thickness")
#
# Returns
# -------
# thickness_file : string
# name of output file, where each element is the thickness
# value of a FreeSurfer mesh vertex. Elements are ordered
# by orders of vertices in FreeSurfer surface file.
#
# """
# import os
# from nipype.interfaces.base import CommandLine
#
# filename = hemi + 'thickness'
# filename_full = os.path.join(subjects_path, subject, filename)
# thickness_file = os.path.join(os.getcwd(), filename + '.dat')
#
# cli = CommandLine(command='mri_convert')
# cli.inputs.args = ' '.join([filename_full, '--ascii+crsf', thickness_file])
# cli.cmdline
# cli.run()
#
# return thickness_file
#def vtk_to_labels(hemi, surface_file, label_numbers, label_names,
# RGBs, scalar_name):
# """
# Write FreeSurfer .label files from a labeled VTK surface mesh.
#
# From https://surfer.nmr.mgh.harvard.edu/fswiki/LabelsClutsAnnotationFiles:
#
# "A label file is a text file capturing a list of vertices belonging to a region,
# including their spatial positions(using R,A,S coordinates). A label file
# corresponds only to a single label, thus contains only a single list of vertices"::
#
# 1806
# 7 -22.796 -66.405 -29.582 0.000000
# 89 -22.273 -43.118 -24.069 0.000000
# 138 -14.142 -81.495 -30.903 0.000000
# [...]
#
# Parameters
# ----------
# hemi : string
# hemisphere
# surface_file : string
# vtk surface mesh file with labels
# label_numbers : list of integers
# label numbers
# label_names : list of strings
# label names
# RGBs : list of lists of 3-tuples
# label RGB values for later conversion to a .annot file
# scalar_name : string
# name of scalar values in vtk file
#
# Returns
# -------
# label_files : list of strings
# label file names (order must match label list)
# colortable : string
# file with list of labels and RGB values
# NOTE: labels are identified by the colortable's RGB values
#
# """
# import os
# import numpy as np
# import vtk
#
# def string_vs_list_check(var):
# """
# Check type to make sure it is a string.
#
# (if a list, return the first element)
# """
#
# # Check type:
# NOTE: change to: type(var).__name__
# if type(var) == str:
# return var
# elif type(var) == list:
# return var[0]
# else:
# os.error("Check format of " + var)
#
# # Check type to make sure the filename is a string
# # (if a list, return the first element)
# surface_file = string_vs_list_check(surface_file)
#
# # Initialize list of label files and output colortable file
# label_files = []
# #relabel_file = os.path.join(os.getcwd(), 'relabel_annot.txt')
# #f_relabel = open(relabel_file, 'w')
# colortable = os.path.join(os.getcwd(), 'colortable.ctab')
# f_rgb = open(colortable, 'w')
#
# # Loop through labels
# irgb = 0
# for ilabel, label_number in enumerate(label_numbers):
#
# # Check type to make sure the number is an int
# label_number = int(label_number)
# label_name = label_names[ilabel]
#
# # Load surface
# reader = vtk.vtkDataSetReader()
# reader.SetFileName(surface_file)
# reader.ReadAllScalarsOn()
# reader.Update()
# data = reader.GetOutput()
# d = data.GetPointData()
# labels = d.GetArray(scalar_name)
#
# # Write vertex index, coordinates, and 0
# count = 0
# npoints = data.GetNumberOfPoints()
# L = np.zeros((npoints,5))
# for i in range(npoints):
# label = labels.GetValue(i)
# if label == label_number:
# L[count,0] = i
# L[count,1:4] = data.GetPoint(i)
# count += 1
#
# # Save the label file
# if count > 0:
# irgb += 1
#
# # Write to relabel_file
# #if irgb != label_number:
# # f_relabel.writelines('{0} {1}\n'.format(irgb, label_number))
#
# # Write to colortable
# f_rgb.writelines('{0} {1} {2}\n'.format(
# irgb, label_name, "0 0 0 0")) # ".join(RGBs[ilabel])))
#
# # Store in list of .label files
# label_file = hemi + '.' + label_name + '.label'
# label_file = os.path.join(os.getcwd(), label_file)
# label_files.append(label_file)
#
# # Write to .label file
# f = open(label_file, 'w')
# f.writelines('#!ascii label\n' + str(count) + '\n')
# for i in range(npoints):
# if any(L[i,:]):
# pr = '{0} {1} {2} {3} 0\n'.format(
# np.int(L[i,0]), L[i,1], L[i,2], L[i,3])
# f.writelines(pr)
# else:
# break
# f.close()
# f_rgb.close()
# #f_relabel.close()
#
# return label_files, colortable
#def labels_to_annot(hemi, subjects_path, subject, label_files,
# colortable, annot_name):
# """
# Convert FreeSurfer .label files to a FreeSurfer .annot file
# using FreeSurfer's mris_label2annot:
# https://surfer.nmr.mgh.harvard.edu/fswiki/mris_label2annot
#
# The order of the .label files must equal the order
# of the labels in the colortable:
# https://surfer.nmr.mgh.harvard.edu/fswiki/LabelsClutsAnnotationFiles
#
# NOTE: The resulting .annot file will have incorrect labels
# if the numbering of the labels is not sequential from 1,2,3...
# For programs like tksurfer, the labels are identified
# by the colortable's RGB values, so to some programs that display
# the label names, the labels could appear correct when not.
# NOTE: You cannot overwrite a .annot file of the same name,
# so in this script I delete it before running.
#
# Parameters
# ----------
# hemi : hemisphere [string]
# subjects_path : path to file
# subject : subject name
# label_files : .label file names [list of strings]
# colortable : file of label numbers & names (same order as label_files)
# annot_name : name of the output .annot file (without prepending hemi)
#
# Returns
# -------
# annot_name : name of .annot file (without prepend)
# annot_file : name of .annot file (with prepend)
#
# """
# import os
# from nipype.interfaces.base import CommandLine
#
# label_files = [f for f in label_files if f!=None]
# if label_files:
# annot_file = hemi + '.' + annot_name + '.annot'
# if os.path.exists(os.path.join(subjects_path, subject, 'label', annot_file)):
# cli = CommandLine(command='rm')
# cli.inputs.args = os.path.join(subjects_path, subject, \
# 'label', annot_file)
# cli.cmdline
# cli.run()
# cli = CommandLine(command='mris_label2annot')
# cli.inputs.args = ' '.join(['--h', hemi, '--s', subject, \
# '--l', ' --l '.join(label_files), \
# '--ctab', colortable, \
# '--a', annot_name])
# cli.cmdline
# cli.run()
#
# return annot_name, annot_file
|
binarybottle/mindboggle_sidelined
|
freesurfer.py
|
Python
|
apache-2.0
| 26,053
|
[
"Gaussian",
"VTK"
] |
7356e034788d73f0e8c7a98177681e34f1ae812db6fcc8862ec1c12650612185
|
# -*- coding: utf-8 -*-
"""
Kay framework.
:Copyright: (c) 2009 Accense Technology, Inc.
Takashi Matsuo <tmatsuo@candit.jp>,
Ian Lewis <IanMLewis@gmail.com>
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import logging
import settings
__version__ = "1.0.0"
KAY_DIR = os.path.abspath(os.path.dirname(__file__))
LIB_DIR = os.path.join(KAY_DIR, 'lib')
PROJECT_DIR = os.path.abspath(os.path.dirname(settings.__file__))
PROJECT_LIB_DIR = os.path.join(PROJECT_DIR, 'lib')
def setup_env(manage_py_env=False):
"""Configures app engine environment for command-line apps."""
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError, e:
# Not on the system path. Build a list of alternative paths where it
# may be. First look within the project for a local copy, then look for
# where the Mac OS SDK installs it.
paths = [os.path.join(PROJECT_DIR, '.google_appengine'),
'/usr/local/google_appengine']
for path in os.environ.get('PATH', '').replace(';', ':').split(':'):
path = path.rstrip(os.sep)
if path.endswith('google_appengine'):
paths.append(path)
if os.name in ('nt', 'dos'):
prefix = '%(PROGRAMFILES)s' % os.environ
paths.append(prefix + r'\Google\google_appengine')
# Loop through all possible paths and look for the SDK dir.
SDK_PATH = None
for sdk_path in paths:
sdk_path = os.path.realpath(sdk_path)
if os.path.exists(sdk_path):
SDK_PATH = sdk_path
break
if SDK_PATH is None:
# The SDK could not be found in any known location.
sys.stderr.write('The Google App Engine SDK could not be found!\n'
'Please visit http://kay-docs.shehas.net/'
' for installation instructions.\n')
sys.exit(1)
# Add the SDK and the libraries within it to the system path.
SDK_PATH = os.path.realpath(SDK_PATH)
# if SDK_PATH points to a file, it could be a zip file.
if os.path.isfile(SDK_PATH):
import zipfile
gae_zip = zipfile.ZipFile(SDK_PATH)
lib_prefix = os.path.join('google_appengine', 'lib')
lib = os.path.join(SDK_PATH, lib_prefix)
pkg_names = []
# add all packages archived under lib in SDK_PATH zip.
for filename in sorted(e.filename for e in gae_zip.filelist):
# package should have __init__.py
if (filename.startswith(lib_prefix) and
filename.endswith('__init__.py')):
pkg_path = filename.replace(os.sep+'__init__.py', '')
# True package root should have __init__.py in upper directory,
# thus we can treat only the shortest unique path as package root.
for pkg_name in pkg_names:
if pkg_path.startswith(pkg_name):
break
else:
pkg_names.append(pkg_path)
# insert populated EXTRA_PATHS into sys.path.
EXTRA_PATHS = ([os.path.dirname(os.path.join(SDK_PATH, pkg_name))
for pkg_name in pkg_names]
+ [os.path.join(SDK_PATH, 'google_appengine')])
sys.path = EXTRA_PATHS + sys.path
# tweak dev_appserver so to make zipimport and templates work well.
from google.appengine.tools import dev_appserver
# make GAE SDK to grant opening library zip.
dev_appserver.FakeFile.ALLOWED_FILES.add(SDK_PATH)
template_dir = 'google_appengine/templates/'
dev_appserver.ApplicationLoggingHandler.InitializeTemplates(
gae_zip.read(template_dir+dev_appserver.HEADER_TEMPLATE),
gae_zip.read(template_dir+dev_appserver.SCRIPT_TEMPLATE),
gae_zip.read(template_dir+dev_appserver.MIDDLE_TEMPLATE),
gae_zip.read(template_dir+dev_appserver.FOOTER_TEMPLATE))
# ... else it could be a directory.
else:
EXTRA_PATHS = [SDK_PATH]
lib = os.path.join(SDK_PATH, 'lib')
# Automatically add all packages in the SDK's lib folder:
for dir in os.listdir(lib):
path = os.path.join(lib, dir)
# Package can be under 'lib/<pkg>/<pkg>/' or 'lib/<pkg>/lib/<pkg>/'
detect = (os.path.join(path, dir), os.path.join(path, 'lib', dir))
for path in detect:
if os.path.isdir(path):
EXTRA_PATHS.append(os.path.dirname(path))
break
sys.path = EXTRA_PATHS + sys.path
# corresponds with another google package
if sys.modules.has_key('google'):
del sys.modules['google']
from google.appengine.api import apiproxy_stub_map
setup()
if not manage_py_env:
return
print 'Running on Kay-%s' % __version__
def setup():
setup_syspath()
from kay.conf import settings
from google.appengine.ext import db
from google.appengine.ext.db import polymodel
class _meta(object):
__slots__ = ('object_name', 'app_label', 'module_name', '_db_table',
'abstract')
def __init__(self, model):
try:
self.app_label = model.__module__.split('.')[-2]
except IndexError:
logging.warn('Kay expects models (here: %s.%s) to be defined in their'
' own apps!' % (model.__module__, model.__name__))
self.app_label = None
self.module_name = model.__name__.lower()
self.abstract = model is db.Model
self.object_name = model.__name__
def _set_db_table(self, db_table):
self._db_table = db_table
def _get_db_table(self):
if getattr(settings, 'ADD_APP_PREFIX_TO_KIND', True):
if hasattr(self, '_db_table'):
return self._db_table
return '%s_%s' % (self.app_label, self.module_name)
return self.object_name
db_table = property(_get_db_table, _set_db_table)
def _initialize_model(cls):
cls._meta = _meta(cls)
old_propertied_class_init = db.PropertiedClass.__init__
def __init__(cls, name, bases, attrs, map_kind=True):
"""
Just add _meta to db.Model.
"""
_initialize_model(cls)
old_propertied_class_init(cls, name, bases, attrs,
not cls._meta.abstract)
db.PropertiedClass.__init__ = __init__
old_poly_init = polymodel.PolymorphicClass.__init__
def __init__(cls, name, bases, attrs):
if polymodel.PolyModel not in bases:
_initialize_model(cls)
old_poly_init(cls, name, bases, attrs)
polymodel.PolymorphicClass.__init__ = __init__
@classmethod
def kind(cls):
return cls._meta.db_table
db.Model.kind = kind
def setup_syspath():
if not PROJECT_DIR in sys.path:
sys.path = [PROJECT_DIR] + sys.path
if not LIB_DIR in sys.path:
sys.path = [LIB_DIR] + sys.path
if not PROJECT_LIB_DIR in sys.path:
sys.path = [PROJECT_LIB_DIR] + sys.path
|
IanLewis/kay
|
kay/__init__.py
|
Python
|
bsd-3-clause
| 6,818
|
[
"VisIt"
] |
785436d2d727b96b80434942c4b05640c03d82294e9f3ad6f7d5db48c4b28b8a
|
"""locate.py - determine architecture and find Java
python-javabridge is licensed under the BSD license. See the
accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2013 Broad Institute
All rights reserved.lo
"""
import ctypes
import os
import sys
import logging
import subprocess
is_linux = sys.platform.startswith('linux')
is_mac = sys.platform == 'darwin'
is_win = sys.platform.startswith("win")
is_win64 = (is_win and (os.environ["PROCESSOR_ARCHITECTURE"] == "AMD64"))
is_msvc = (is_win and sys.version_info[0] >= 2 and sys.version_info[1] >= 6)
is_mingw = (is_win and not is_msvc)
logger = logging.getLogger(__name__)
def find_javahome():
"""Find JAVA_HOME if it doesn't exist"""
if hasattr(sys, 'frozen') and is_win:
#
# The standard installation of CellProfiler for Windows comes with a JRE
#
path = os.path.split(os.path.abspath(sys.argv[0]))[0]
path = os.path.join(path, "jre")
for jvm_folder in ("client", "server"):
jvm_path = os.path.join(path, "bin", jvm_folder, "jvm.dll")
if os.path.exists(jvm_path):
# Problem: have seen JAVA_HOME != jvm_path cause DLL load problems
if os.environ.has_key("JAVA_HOME"):
del os.environ["JAVA_HOME"]
return path
if os.environ.has_key('JAVA_HOME'):
return os.environ['JAVA_HOME']
elif is_mac:
# Use the "java_home" executable to find the location
# see "man java_home"
libc = ctypes.CDLL("/usr/lib/libc.dylib")
if sys.maxsize <= 2**32:
arch = "i386"
else:
arch = "x86_64"
try:
result = subprocess.check_output(["/usr/libexec/java_home", "--arch", arch])
path = result.strip()
for place_to_look in (
os.path.join(os.path.dirname(path), "Libraries"),
os.path.join(path, "jre", "lib", "server")):
lib = os.path.join(place_to_look, "libjvm.dylib")
#
# dlopen_preflight checks to make sure libjvm.dylib
# can be loaded in the current architecture
#
if os.path.exists(lib) and \
libc.dlopen_preflight(lib) !=0:
return path
else:
logger.error("Could not find Java JRE compatible with %s architecture" % arch)
if arch == "i386":
logger.error(
"Please visit https://support.apple.com/kb/DL1572 for help\n"
"installing Apple legacy Java 1.6 for 32 bit support.")
return None
except:
logger.error("Failed to run /usr/libexec/java_home, defaulting to best guess for Java", exc_info=1)
return "/System/Library/Frameworks/JavaVM.framework/Home"
elif is_linux:
def get_out(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
o, ignore = p.communicate()
if p.poll() != 0:
raise Exception("Error finding javahome on linux: %s" % cmd)
o = o.strip()
return o
java_bin = get_out(["bash", "-c", "type -p java"])
java_dir = get_out(["readlink", "-f", java_bin])
jdk_dir = os.path.join(java_dir, "..", "..", "..")
jdk_dir = os.path.abspath(jdk_dir)
return jdk_dir
elif is_win:
import _winreg
java_key_path = 'SOFTWARE\\JavaSoft\\Java Runtime Environment'
looking_for = java_key_path
try:
kjava = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, java_key_path)
looking_for = java_key_path + "\\CurrentVersion"
kjava_values = dict([_winreg.EnumValue(kjava, i)[:2]
for i in range(_winreg.QueryInfoKey(kjava)[1])])
current_version = kjava_values['CurrentVersion']
looking_for = java_key_path + '\\' + current_version
kjava_current = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
looking_for)
kjava_current_values = dict([_winreg.EnumValue(kjava_current, i)[:2]
for i in range(_winreg.QueryInfoKey(kjava_current)[1])])
return kjava_current_values['JavaHome']
except:
logger.error("Failed to find registry entry: %s\n" %looking_for,
exc_info=True)
return None
def find_jdk():
"""Find the JDK under Windows"""
if os.environ.has_key('JDK_HOME'):
return os.environ['JDK_HOME']
if is_mac:
return find_javahome()
if is_win:
import _winreg
import exceptions
try:
jdk_key_path = 'SOFTWARE\\JavaSoft\\Java Development Kit'
kjdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, jdk_key_path)
kjdk_values = dict([_winreg.EnumValue(kjdk, i)[:2]
for i in range(_winreg.QueryInfoKey(kjdk)[1])])
current_version = kjdk_values['CurrentVersion']
kjdk_current = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
jdk_key_path + '\\' + current_version)
kjdk_current_values = dict([_winreg.EnumValue(kjdk_current, i)[:2]
for i in range(_winreg.QueryInfoKey(kjdk_current)[1])])
return kjdk_current_values['JavaHome']
except exceptions.WindowsError as e:
if e.errno == 2:
raise RuntimeError(
"Failed to find the Java Development Kit. Please download and install the Oracle JDK 1.6 or later")
else:
raise
def find_javac_cmd():
"""Find the javac executable"""
if is_win:
jdk_base = find_jdk()
javac = os.path.join(jdk_base, "bin", "javac.exe")
if os.path.isfile(javac):
return javac
raise RuntimeError("Failed to find javac.exe in its usual location under the JDK (%s)" % javac)
else:
# will be along path for other platforms
return "javac"
def find_jar_cmd():
"""Find the javac executable"""
if is_win:
jdk_base = find_jdk()
javac = os.path.join(jdk_base, "bin", "jar.exe")
if os.path.isfile(javac):
return javac
raise RuntimeError("Failed to find jar.exe in its usual location under the JDK (%s)" % javac)
else:
# will be along path for other platforms
return "jar"
|
jakirkham/python-javabridge
|
javabridge/locate.py
|
Python
|
bsd-3-clause
| 6,661
|
[
"VisIt"
] |
5e3039f4875d65a2dc7fdb7856e6a822bccc88e73d629937f8d8d44b1dce8ff2
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hparams for model architecture and trainer."""
import ast
import collections
import copy
from typing import Any, Dict, Text
import six
import tensorflow as tf
import yaml
def eval_str_fn(val):
if val in {'true', 'false'}:
return val == 'true'
try:
return ast.literal_eval(val)
except (ValueError, SyntaxError):
return val
# pylint: disable=protected-access
class Config(object):
"""A config utility class."""
def __init__(self, config_dict=None):
self.update(config_dict)
def __setattr__(self, k, v):
self.__dict__[k] = Config(v) if isinstance(v, dict) else copy.deepcopy(v)
def __getattr__(self, k):
return self.__dict__[k]
def __getitem__(self, k):
return self.__dict__[k]
def __repr__(self):
return repr(self.as_dict())
def __deepcopy__(self, memodict):
return type(self)(self.as_dict())
def __str__(self):
try:
return yaml.dump(self.as_dict(), indent=4)
except TypeError:
return str(self.as_dict())
def _update(self, config_dict, allow_new_keys=True):
"""Recursively update internal members."""
if not config_dict:
return
for k, v in six.iteritems(config_dict):
if k not in self.__dict__:
if allow_new_keys:
self.__setattr__(k, v)
else:
raise KeyError('Key `{}` does not exist for overriding. '.format(k))
else:
if isinstance(self.__dict__[k], Config) and isinstance(v, dict):
self.__dict__[k]._update(v, allow_new_keys)
elif isinstance(self.__dict__[k], Config) and isinstance(v, Config):
self.__dict__[k]._update(v.as_dict(), allow_new_keys)
else:
self.__setattr__(k, v)
def get(self, k, default_value=None):
return self.__dict__.get(k, default_value)
def update(self, config_dict):
"""Update members while allowing new keys."""
self._update(config_dict, allow_new_keys=True)
def keys(self):
return self.__dict__.keys()
def override(self, config_dict_or_str, allow_new_keys=False):
"""Update members while disallowing new keys."""
if isinstance(config_dict_or_str, str):
if not config_dict_or_str:
return
elif '=' in config_dict_or_str:
config_dict = self.parse_from_str(config_dict_or_str)
elif config_dict_or_str.endswith('.yaml'):
config_dict = self.parse_from_yaml(config_dict_or_str)
else:
raise ValueError(
'Invalid string {}, must end with .yaml or contains "=".'.format(
config_dict_or_str))
elif isinstance(config_dict_or_str, dict):
config_dict = config_dict_or_str
else:
raise ValueError('Unknown value type: {}'.format(config_dict_or_str))
self._update(config_dict, allow_new_keys)
def parse_from_yaml(self, yaml_file_path: Text) -> Dict[Any, Any]:
"""Parses a yaml file and returns a dictionary."""
with tf.io.gfile.GFile(yaml_file_path, 'r') as f:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
return config_dict
def save_to_yaml(self, yaml_file_path):
"""Write a dictionary into a yaml file."""
with tf.io.gfile.GFile(yaml_file_path, 'w') as f:
yaml.dump(self.as_dict(), f, default_flow_style=False)
def parse_from_str(self, config_str: Text) -> Dict[Any, Any]:
"""Parse a string like 'x.y=1,x.z=2' to nested dict {x: {y: 1, z: 2}}."""
if not config_str:
return {}
config_dict = {}
try:
for kv_pair in config_str.split(','):
if not kv_pair: # skip empty string
continue
key_str, value_str = kv_pair.split('=')
key_str = key_str.strip()
def add_kv_recursive(k, v):
"""Recursively parse x.y.z=tt to {x: {y: {z: tt}}}."""
if '.' not in k:
if '*' in v:
# we reserve * to split arrays.
return {k: [eval_str_fn(vv) for vv in v.split('*')]}
return {k: eval_str_fn(v)}
pos = k.index('.')
return {k[:pos]: add_kv_recursive(k[pos + 1:], v)}
def merge_dict_recursive(target, src):
"""Recursively merge two nested dictionary."""
for k in src.keys():
if ((k in target and isinstance(target[k], dict) and
isinstance(src[k], collections.Mapping))):
merge_dict_recursive(target[k], src[k])
else:
target[k] = src[k]
merge_dict_recursive(config_dict, add_kv_recursive(key_str, value_str))
return config_dict
except ValueError:
raise ValueError('Invalid config_str: {}'.format(config_str))
def as_dict(self):
"""Returns a dict representation."""
config_dict = {}
for k, v in six.iteritems(self.__dict__):
if isinstance(v, Config):
config_dict[k] = v.as_dict()
else:
config_dict[k] = copy.deepcopy(v)
return config_dict
# pylint: enable=protected-access
def default_detection_configs():
"""Returns a default detection configs."""
h = Config()
# model name.
h.name = 'efficientdet-d1'
# activation type: see activation_fn in utils.py.
h.act_type = 'swish'
# input preprocessing parameters
h.image_size = 640 # An integer or a string WxH such as 640x320.
h.target_size = None
h.input_rand_hflip = True
h.jitter_min = 0.1
h.jitter_max = 2.0
h.autoaugment_policy = None
h.grid_mask = False
h.sample_image = None
h.map_freq = 5 # AP eval frequency in epochs.
# dataset specific parameters
# TODO(tanmingxing): update this to be 91 for COCO, and 21 for pascal.
h.num_classes = 90 # 1+ actual classes, 0 is reserved for background.
h.seg_num_classes = 3 # segmentation classes
h.heads = ['object_detection'] # 'object_detection', 'segmentation'
h.skip_crowd_during_training = True
h.label_map = None # a dict or a string of 'coco', 'voc', 'waymo'.
h.max_instances_per_image = 100 # Default to 100 for COCO.
h.regenerate_source_id = False
# model architecture
h.min_level = 3
h.max_level = 7
h.num_scales = 3
# ratio w/h: 2.0 means w=1.4, h=0.7. Can be computed with k-mean per dataset.
h.aspect_ratios = [1.0, 2.0, 0.5] # [[0.7, 1.4], [1.0, 1.0], [1.4, 0.7]]
h.anchor_scale = 4.0
# is batchnorm training mode
h.is_training_bn = True
# optimization
h.momentum = 0.9
h.optimizer = 'sgd' # can be 'adam' or 'sgd'.
h.learning_rate = 0.08 # 0.008 for adam.
h.lr_warmup_init = 0.008 # 0.0008 for adam.
h.lr_warmup_epoch = 1.0
h.first_lr_drop_epoch = 200.0
h.second_lr_drop_epoch = 250.0
h.poly_lr_power = 0.9
h.clip_gradients_norm = 10.0
h.num_epochs = 300
h.data_format = 'channels_last'
# The default image normalization is identical to Cloud TPU ResNet.
h.mean_rgb = [0.485 * 255, 0.456 * 255, 0.406 * 255]
h.stddev_rgb = [0.229 * 255, 0.224 * 255, 0.225 * 255]
h.scale_range = False
# classification loss
h.label_smoothing = 0.0 # 0.1 is a good default
# Behold the focal loss parameters
h.alpha = 0.25
h.gamma = 1.5
# localization loss
h.delta = 0.1 # regularization parameter of huber loss.
# total loss = box_loss * box_loss_weight + iou_loss * iou_loss_weight
h.box_loss_weight = 50.0
h.iou_loss_type = None
h.iou_loss_weight = 1.0
# regularization l2 loss.
h.weight_decay = 4e-5
h.strategy = None # 'tpu', 'gpus', None
h.mixed_precision = False # If False, use float32.
h.loss_scale = None # set to 2**16 enables dynamic loss scale
h.model_optimizations = {} # 'prune':{}
# For detection.
h.box_class_repeats = 3
h.fpn_cell_repeats = 3
h.fpn_num_filters = 88
h.separable_conv = True
h.apply_bn_for_resampling = True
h.conv_after_downsample = False
h.conv_bn_act_pattern = False
h.drop_remainder = True # drop remainder for the final batch eval.
# For post-processing nms, must be a dict.
h.nms_configs = {
'method': 'gaussian',
'iou_thresh': None, # use the default value based on method.
'score_thresh': 0.,
'sigma': None,
'pyfunc': False,
'max_nms_inputs': 0,
'max_output_size': 100,
}
h.tflite_max_detections = 100
# version.
h.fpn_name = None
h.fpn_weight_method = None
h.fpn_config = None
h.batch_norm_trainable = True
# No stochastic depth in default.
h.survival_prob = None
h.img_summary_steps = None
h.lr_decay_method = 'cosine'
h.moving_average_decay = 0.9998
h.ckpt_var_scope = None # ckpt variable scope.
# If true, skip loading pretrained weights if shape mismatches.
h.skip_mismatch = True
h.backbone_name = 'efficientnet-b1'
h.backbone_config = None
h.var_freeze_expr = None
# A temporary flag to switch between legacy and keras models.
h.use_keras_model = True
h.dataset_type = None
h.positives_momentum = None
h.grad_checkpoint = False
# Parameters for the Checkpoint Callback.
h.verbose = 1
h.save_freq = 'epoch'
return h
efficientdet_model_param_dict = {
'efficientdet-d0':
dict(
name='efficientdet-d0',
backbone_name='efficientnet-b0',
image_size=512,
fpn_num_filters=64,
fpn_cell_repeats=3,
box_class_repeats=3,
),
'efficientdet-d1':
dict(
name='efficientdet-d1',
backbone_name='efficientnet-b1',
image_size=640,
fpn_num_filters=88,
fpn_cell_repeats=4,
box_class_repeats=3,
),
'efficientdet-d2':
dict(
name='efficientdet-d2',
backbone_name='efficientnet-b2',
image_size=768,
fpn_num_filters=112,
fpn_cell_repeats=5,
box_class_repeats=3,
),
'efficientdet-d3':
dict(
name='efficientdet-d3',
backbone_name='efficientnet-b3',
image_size=896,
fpn_num_filters=160,
fpn_cell_repeats=6,
box_class_repeats=4,
),
'efficientdet-d4':
dict(
name='efficientdet-d4',
backbone_name='efficientnet-b4',
image_size=1024,
fpn_num_filters=224,
fpn_cell_repeats=7,
box_class_repeats=4,
),
'efficientdet-d5':
dict(
name='efficientdet-d5',
backbone_name='efficientnet-b5',
image_size=1280,
fpn_num_filters=288,
fpn_cell_repeats=7,
box_class_repeats=4,
),
'efficientdet-d6':
dict(
name='efficientdet-d6',
backbone_name='efficientnet-b6',
image_size=1280,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
fpn_weight_method='sum', # Use unweighted sum for stability.
),
'efficientdet-d7':
dict(
name='efficientdet-d7',
backbone_name='efficientnet-b6',
image_size=1536,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=5.0,
fpn_weight_method='sum', # Use unweighted sum for stability.
),
'efficientdet-d7x':
dict(
name='efficientdet-d7x',
backbone_name='efficientnet-b7',
image_size=1536,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=4.0,
max_level=8,
fpn_weight_method='sum', # Use unweighted sum for stability.
),
}
lite_common_param = dict(
mean_rgb=127.0,
stddev_rgb=128.0,
act_type='relu6',
fpn_weight_method='sum',
)
efficientdet_lite_param_dict = {
# lite models are in progress and subject to changes.
# mean_rgb and stddev_rgb are consistent with EfficientNet-Lite models in
# https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/lite/efficientnet_lite_builder.py#L28
'efficientdet-lite0':
dict(
name='efficientdet-lite0',
backbone_name='efficientnet-lite0',
image_size=320,
fpn_num_filters=64,
fpn_cell_repeats=3,
box_class_repeats=3,
anchor_scale=3.0,
**lite_common_param,
),
'efficientdet-lite1':
dict(
name='efficientdet-lite1',
backbone_name='efficientnet-lite1',
image_size=384,
fpn_num_filters=88,
fpn_cell_repeats=4,
box_class_repeats=3,
anchor_scale=3.0,
**lite_common_param,
),
'efficientdet-lite2':
dict(
name='efficientdet-lite2',
backbone_name='efficientnet-lite2',
image_size=448,
fpn_num_filters=112,
fpn_cell_repeats=5,
box_class_repeats=3,
anchor_scale=3.0,
**lite_common_param,
),
'efficientdet-lite3':
dict(
name='efficientdet-lite3',
backbone_name='efficientnet-lite3',
image_size=512,
fpn_num_filters=160,
fpn_cell_repeats=6,
box_class_repeats=4,
**lite_common_param,
),
'efficientdet-lite3x':
dict(
name='efficientdet-lite3x',
backbone_name='efficientnet-lite3',
image_size=640,
fpn_num_filters=200,
fpn_cell_repeats=6,
box_class_repeats=4,
anchor_scale=3.0,
**lite_common_param,
),
'efficientdet-lite4':
dict(
name='efficientdet-lite4',
backbone_name='efficientnet-lite4',
image_size=640,
fpn_num_filters=224,
fpn_cell_repeats=7,
box_class_repeats=4,
**lite_common_param,
),
}
def get_efficientdet_config(model_name='efficientdet-d1'):
"""Get the default config for EfficientDet based on model name."""
h = default_detection_configs()
if model_name in efficientdet_model_param_dict:
h.override(efficientdet_model_param_dict[model_name])
elif model_name in efficientdet_lite_param_dict:
h.override(efficientdet_lite_param_dict[model_name])
else:
raise ValueError('Unknown model name: {}'.format(model_name))
return h
def get_detection_config(model_name):
if model_name.startswith('efficientdet'):
return get_efficientdet_config(model_name)
else:
raise ValueError('model name must start with efficientdet.')
|
tensorflow/examples
|
tensorflow_examples/lite/model_maker/third_party/efficientdet/hparams_config.py
|
Python
|
apache-2.0
| 15,188
|
[
"Gaussian"
] |
1af52ddcb0e51570089d331daaf8bd2dc2a67121a4f967b002d01215f88a5266
|
#!/usr/bin/env python
"""Compartment.py:
A compartment in moose.
Last modified: Tue May 13, 2014 06:03PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import unittest
import math
import moose
import moose.utils as utils
class MooseCompartment():
"""A simple class for making MooseCompartment in moose"""
def __init__(self, path, length, diameter, args):
""" Initialize moose-compartment """
self.mc_ = None
self.path = path
# Following values are taken from Upi's chapter on Rallpacks
self.RM = args.get('RM', 4.0)
self.RA = args.get('RA', 1.0)
self.CM = args.get('CM', 0.01)
self.Em = args.get('Em', -0.065)
self.diameter = diameter
self.compLength = length
self.computeParams( )
try:
self.mc_ = moose.Compartment(self.path)
self.mc_.length = self.compLength
self.mc_.diameter = self.diameter
self.mc_.Ra = self.Ra
self.mc_.Rm = self.Rm
self.mc_.Cm = self.Cm
self.mc_.Em = self.Em
self.mc_.initVm = self.Em
except Exception as e:
utils.dump("ERROR"
, [ "Can't create compartment with path %s " % path
, "Failed with error %s " % e
]
)
raise
#utils.dump('DEBUG', [ 'Compartment: {}'.format( self ) ] )
def __repr__( self ):
msg = '{}: '.format( self.mc_.path )
msg += '\n\t|- Length: {:1.4e}, Diameter: {:1.4e}'.format(
self.mc_.length, self.mc_.diameter
)
# msg += '\n\t|- Cross-section: {:1.4e}, SurfaceArea: {:1.4e}'.format(
# self.crossSection, self.surfaceArea
# )
msg += '\n\t|- Ra: {:1.3e}, Rm: {:1.3e}, Cm: {:1.3e}, Em: {:1.3e}'.format(
self.mc_.Ra, self.mc_.Rm, self.mc_.Cm, self.mc_.Em
)
return msg
def __str__( self ):
return self.__repr__( )
def computeParams( self ):
'''Compute essentials paramters for compartment. '''
self.surfaceArea = math.pi * self.compLength * self.diameter
self.crossSection = ( math.pi * self.diameter * self.diameter ) / 4.0
self.Ra = ( self.RA * self.compLength ) / self.crossSection
self.Rm = ( self.RM / self.surfaceArea )
self.Cm = ( self.CM * self.surfaceArea )
class TestCompartment( unittest.TestCase):
''' Test class '''
def setUp( self ):
self.dut = MooseCompartment()
self.dut.createCompartment( path = '/dut1' )
def test_creation( self ):
m = MooseCompartment( )
m.createCompartment( path = '/compartment1' )
self.assertTrue( m.mc_
, 'Always create compartments when parent is /.'
)
m = MooseCompartment( )
m.createCompartment( path='/model/compartment1' )
self.assertFalse ( m.mc_
, 'Should not create compartment when parent does not exists.'
)
def test_properties( self ):
m = MooseCompartment()
m.createCompartment('/comp1')
self.assertTrue( m.mc_.Em <= 0.0
, "Em is initialized to some positive value."
" Current value is %s " % m.mc_.Em
)
self.assertTrue( m.mc_.Rm >= 0.0
, "Rm should be initialized to non-zero positive float"
" Current value is: {}".format( m.mc_.Rm )
)
def test_repr ( self ):
print( self.dut )
if __name__ == "__main__":
unittest.main()
|
subhacom/moose-core
|
tests/python/Rallpacks/compartment.py
|
Python
|
gpl-3.0
| 3,980
|
[
"MOOSE"
] |
3c7832873f9d2cb2453e62b9bf597a59b986fb62e4d7525be51ebd6c5605bf94
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["multivariate_gaussian_samples", "nd_sort_samples"]
import numpy as np
from scipy.spatial import cKDTree
def multivariate_gaussian_samples(matrix, N, mean=None):
"""
Generate samples from a multidimensional Gaussian with a given covariance.
:param matrix: ``(k, k)``
The covariance matrix.
:param N:
The number of samples to generate.
:param mean: ``(k,)`` (optional)
The mean of the Gaussian. Assumed to be zero if not given.
:returns samples: ``(k,)`` or ``(N, k)``
Samples from the given multivariate normal.
"""
if mean is None:
mean = np.zeros(len(matrix))
samples = np.random.multivariate_normal(mean, matrix, N)
if N == 1:
return samples[0]
return samples
def nd_sort_samples(samples):
"""
Sort an N-dimensional list of samples using a KDTree.
:param samples: ``(nsamples, ndim)``
The list of samples. This must be a two-dimensional array.
:returns i: ``(nsamples,)``
The list of indices into the original array that return the correctly
sorted version.
"""
# Check the shape of the sample list.
assert len(samples.shape) == 2
# Build a KD-tree on the samples.
tree = cKDTree(samples)
# Compute the distances.
d, i = tree.query(samples[0], k=len(samples))
return i
def numerical_gradient(f, x, dx=1.234e-6):
g = np.empty_like(x, dtype=float)
for i in range(len(g)):
x[i] += dx
fp = f(x)
x[i] -= 2*dx
fm = f(x)
x[i] += dx
g[i] = 0.5 * (fp - fm) / dx
return g
|
andres-jordan/george
|
george/utils.py
|
Python
|
mit
| 1,691
|
[
"Gaussian"
] |
03e6de2344a48b0f4d6c7a0f10bd4485a7662d92fe3ac71d00663e1e2adc85ed
|
# Author: Travis Oliphant
# 1999 -- 2002
import operator
import math
import timeit
from scipy.spatial import cKDTree
from . import _sigtools, dlti
from ._upfirdn import upfirdn, _output_len, _upfirdn_modes
from scipy import linalg, fft as sp_fft
from scipy.fft._helper import _init_nd_shape_and_axes
from scipy._lib._util import prod as _prod
import numpy as np
from scipy.special import lambertw
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
from ._filter_design import cheby1, _validate_sos
from ._fir_filter_design import firwin
from ._sosfilt import _sosfilt
import warnings
__all__ = ['correlate', 'correlation_lags', 'correlate2d',
'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'resample_poly', 'detrend',
'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
def _valfrommode(mode):
try:
return _modedict[mode]
except KeyError as e:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.") from e
def _bvalfromboundary(boundary):
try:
return _boundarydict[boundary] << 2
except KeyError as e:
raise ValueError("Acceptable boundary flags are 'fill', 'circular' "
"(or 'wrap'), and 'symmetric' (or 'symm').") from e
def _inputs_swap_needed(mode, shape1, shape2, axes=None):
"""Determine if inputs arrays need to be swapped in `"valid"` mode.
If in `"valid"` mode, returns whether or not the input arrays need to be
swapped depending on whether `shape1` is at least as large as `shape2` in
every calculated dimension.
This is important for some of the correlation and convolution
implementations in this module, where the larger array input needs to come
before the smaller array input when operating in this mode.
Note that if the mode provided is not 'valid', False is immediately
returned.
"""
if mode != 'valid':
return False
if not shape1:
return False
if axes is None:
axes = range(len(shape1))
ok1 = all(shape1[i] >= shape2[i] for i in axes)
ok2 = all(shape2[i] >= shape1[i] for i in axes)
if not (ok1 or ok2):
raise ValueError("For 'valid' mode, one must be at least "
"as large as the other in every dimension")
return not ok1
def correlate(in1, in2, mode='full', method='auto'):
r"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
See Also
--------
choose_conv_method : contains more documentation on `method`.
correlation_lags : calculates the lag / displacement indices array for 1D
cross-correlation.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as::
z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``
then
.. math::
z[k] = (x * y)(k - N + 1)
= \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
and :math:`y_m` is 0 when m is outside the range of y.
``method='fft'`` only works for numerical arrays as it relies on
`fftconvolve`. In certain cases (i.e., arrays of objects or when
rounding integers can lose precision), ``method='direct'`` is always used.
When using "same" mode with even-length inputs, the outputs of `correlate`
and `correlate2d` differ: There is a 1-index offset between them.
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + rng.standard_normal(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> plt.show()
Compute the cross-correlation of a noisy signal with the original signal.
>>> x = np.arange(128) / 128
>>> sig = np.sin(2 * np.pi * x)
>>> sig_noise = sig + rng.standard_normal(len(sig))
>>> corr = signal.correlate(sig_noise, sig)
>>> lags = signal.correlation_lags(len(sig), len(sig_noise))
>>> corr /= np.max(corr)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, figsize=(4.8, 4.8))
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original signal')
>>> ax_orig.set_xlabel('Sample Number')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_noise.set_xlabel('Sample Number')
>>> ax_corr.plot(lags, corr)
>>> ax_corr.set_title('Cross-correlated signal')
>>> ax_corr.set_xlabel('Lag')
>>> ax_orig.margins(0, 0.1)
>>> ax_noise.margins(0, 0.1)
>>> ax_corr.margins(0, 0.1)
>>> fig.tight_layout()
>>> plt.show()
"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if in1.ndim == in2.ndim == 0:
return in1 * in2.conj()
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError as e:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.") from e
# this either calls fftconvolve or this function with method=='direct'
if method in ('fft', 'auto'):
return convolve(in1, _reverse_and_conj(in2), mode, method)
elif method == 'direct':
# fastpath to faster numpy.correlate for 1d inputs when possible
if _np_conv_ok(in1, in2, mode):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward if mode == 'full'. Also, it fails
# with 'valid' mode if in2 is larger than in1, so swap those, too.
# Don't swap inputs for 'same' mode, since shape of in1 matters.
swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
_inputs_swap_needed(mode, in1.shape, in2.shape))
if swapped_inputs:
in1, in2 = in2, in1
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = _sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = tuple(slice(0, i) for i in in1.shape)
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = _sigtools._correlateND(in1zpadded, in2, out, val)
if swapped_inputs:
# Reverse and conjugate to undo the effect of swapping inputs
z = _reverse_and_conj(z)
return z
else:
raise ValueError("Acceptable method flags are 'auto',"
" 'direct', or 'fft'.")
def correlation_lags(in1_len, in2_len, mode='full'):
r"""
Calculates the lag / displacement indices array for 1D cross-correlation.
Parameters
----------
in1_size : int
First input size.
in2_size : int
Second input size.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output.
See the documentation `correlate` for more information.
See Also
--------
correlate : Compute the N-dimensional cross-correlation.
Returns
-------
lags : array
Returns an array containing cross-correlation lag/displacement indices.
Indices can be indexed with the np.argmax of the correlation to return
the lag/displacement.
Notes
-----
Cross-correlation for continuous functions :math:`f` and :math:`g` is
defined as:
.. math::
\left ( f\star g \right )\left ( \tau \right )
\triangleq \int_{t_0}^{t_0 +T}
\overline{f\left ( t \right )}g\left ( t+\tau \right )dt
Where :math:`\tau` is defined as the displacement, also known as the lag.
Cross correlation for discrete functions :math:`f` and :math:`g` is
defined as:
.. math::
\left ( f\star g \right )\left [ n \right ]
\triangleq \sum_{-\infty}^{\infty}
\overline{f\left [ m \right ]}g\left [ m+n \right ]
Where :math:`n` is the lag.
Examples
--------
Cross-correlation of a signal with its time-delayed self.
>>> from scipy import signal
>>> from numpy.random import default_rng
>>> rng = default_rng()
>>> x = rng.standard_normal(1000)
>>> y = np.concatenate([rng.standard_normal(100), x])
>>> correlation = signal.correlate(x, y, mode="full")
>>> lags = signal.correlation_lags(x.size, y.size, mode="full")
>>> lag = lags[np.argmax(correlation)]
"""
# calculate lag ranges in different modes of operation
if mode == "full":
# the output is the full discrete linear convolution
# of the inputs. (Default)
lags = np.arange(-in2_len + 1, in1_len)
elif mode == "same":
# the output is the same size as `in1`, centered
# with respect to the 'full' output.
# calculate the full output
lags = np.arange(-in2_len + 1, in1_len)
# determine the midpoint in the full output
mid = lags.size // 2
# determine lag_bound to be used with respect
# to the midpoint
lag_bound = in1_len // 2
# calculate lag ranges for even and odd scenarios
if in1_len % 2 == 0:
lags = lags[(mid-lag_bound):(mid+lag_bound)]
else:
lags = lags[(mid-lag_bound):(mid+lag_bound)+1]
elif mode == "valid":
# the output consists only of those elements that do not
# rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
# must be at least as large as the other in every dimension.
# the lag_bound will be either negative or positive
# this let's us infer how to present the lag range
lag_bound = in1_len - in2_len
if lag_bound >= 0:
lags = np.arange(lag_bound + 1)
else:
lags = np.arange(lag_bound, 1)
return lags
def _centered(arr, newshape):
# Return the center newshape portion of the array.
newshape = np.asarray(newshape)
currshape = np.array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False):
"""Handle the axes argument for frequency-domain convolution.
Returns the inputs and axes in a standard form, eliminating redundant axes,
swapping the inputs if necessary, and checking for various potential
errors.
Parameters
----------
in1 : array
First input.
in2 : array
Second input.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output.
See the documentation `fftconvolve` for more information.
axes : list of ints
Axes over which to compute the FFTs.
sorted_axes : bool, optional
If `True`, sort the axes.
Default is `False`, do not sort.
Returns
-------
in1 : array
The first input, possible swapped with the second input.
in2 : array
The second input, possible swapped with the first input.
axes : list of ints
Axes over which to compute the FFTs.
"""
s1 = in1.shape
s2 = in2.shape
noaxes = axes is None
_, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes)
if not noaxes and not len(axes):
raise ValueError("when provided, axes cannot be empty")
# Axes of length 1 can rely on broadcasting rules for multipy,
# no fft needed.
axes = [a for a in axes if s1[a] != 1 and s2[a] != 1]
if sorted_axes:
axes.sort()
if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1
for a in range(in1.ndim) if a not in axes):
raise ValueError("incompatible shapes for in1 and in2:"
" {0} and {1}".format(s1, s2))
# Check that input sizes are compatible with 'valid' mode.
if _inputs_swap_needed(mode, s1, s2, axes=axes):
# Convolution is commutative; order doesn't have any effect on output.
in1, in2 = in2, in1
return in1, in2, axes
def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False):
"""Convolve two arrays in the frequency domain.
This function implements only base the FFT-related operations.
Specifically, it converts the signals to the frequency domain, multiplies
them, then converts them back to the time domain. Calculations of axes,
shapes, convolution mode, etc. are implemented in higher level-functions,
such as `fftconvolve` and `oaconvolve`. Those functions should be used
instead of this one.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
axes : array_like of ints
Axes over which to compute the FFTs.
shape : array_like of ints
The sizes of the FFTs.
calc_fast_len : bool, optional
If `True`, set each value of `shape` to the next fast FFT length.
Default is `False`, use `axes` as-is.
Returns
-------
out : array
An N-dimensional array containing the discrete linear convolution of
`in1` with `in2`.
"""
if not len(axes):
return in1 * in2
complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c')
if calc_fast_len:
# Speed up FFT by padding to optimal size.
fshape = [
sp_fft.next_fast_len(shape[a], not complex_result) for a in axes]
else:
fshape = shape
if not complex_result:
fft, ifft = sp_fft.rfftn, sp_fft.irfftn
else:
fft, ifft = sp_fft.fftn, sp_fft.ifftn
sp1 = fft(in1, fshape, axes=axes)
sp2 = fft(in2, fshape, axes=axes)
ret = ifft(sp1 * sp2, fshape, axes=axes)
if calc_fast_len:
fslice = tuple([slice(sz) for sz in shape])
ret = ret[fslice]
return ret
def _apply_conv_mode(ret, s1, s2, mode, axes):
"""Calculate the convolution result shape based on the `mode` argument.
Returns the result sliced to the correct size for the given mode.
Parameters
----------
ret : array
The result array, with the appropriate shape for the 'full' mode.
s1 : list of int
The shape of the first input.
s2 : list of int
The shape of the second input.
mode : str {'full', 'valid', 'same'}
A string indicating the size of the output.
See the documentation `fftconvolve` for more information.
axes : list of ints
Axes over which to compute the convolution.
Returns
-------
ret : array
A copy of `res`, sliced to the correct size for the given `mode`.
"""
if mode == "full":
return ret.copy()
elif mode == "same":
return _centered(ret, s1).copy()
elif mode == "valid":
shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1
for a in range(ret.ndim)]
return _centered(ret, shape_valid).copy()
else:
raise ValueError("acceptable mode flags are 'valid',"
" 'same', or 'full'")
def fftconvolve(in1, in2, mode="full", axes=None):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
As of v0.19, `convolve` automatically chooses this method or the direct
method based on an estimation of which is faster.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
axes : int or array_like of ints or None, optional
Axes over which to compute the convolution.
The default is over all axes.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
convolve : Uses the direct convolution or FFT convolution algorithm
depending on which is faster.
oaconvolve : Uses the overlap-add method to do convolution, which is
generally faster when the input arrays are large and
significantly different in size.
Examples
--------
Autocorrelation of white noise is an impulse.
>>> from scipy import signal
>>> rng = np.random.default_rng()
>>> sig = rng.standard_normal(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.windows.gaussian(70, 8),
... signal.windows.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return np.array([])
in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,
sorted_axes=False)
s1 = in1.shape
s2 = in2.shape
shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1
for i in range(in1.ndim)]
ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True)
return _apply_conv_mode(ret, s1, s2, mode, axes)
def _calc_oa_lens(s1, s2):
"""Calculate the optimal FFT lengths for overlapp-add convolution.
The calculation is done for a single dimension.
Parameters
----------
s1 : int
Size of the dimension for the first array.
s2 : int
Size of the dimension for the second array.
Returns
-------
block_size : int
The size of the FFT blocks.
overlap : int
The amount of overlap between two blocks.
in1_step : int
The size of each step for the first array.
in2_step : int
The size of each step for the first array.
"""
# Set up the arguments for the conventional FFT approach.
fallback = (s1+s2-1, None, s1, s2)
# Use conventional FFT convolve if sizes are same.
if s1 == s2 or s1 == 1 or s2 == 1:
return fallback
if s2 > s1:
s1, s2 = s2, s1
swapped = True
else:
swapped = False
# There cannot be a useful block size if s2 is more than half of s1.
if s2 >= s1/2:
return fallback
# Derivation of optimal block length
# For original formula see:
# https://en.wikipedia.org/wiki/Overlap-add_method
#
# Formula:
# K = overlap = s2-1
# N = block_size
# C = complexity
# e = exponential, exp(1)
#
# C = (N*(log2(N)+1))/(N-K)
# C = (N*log2(2N))/(N-K)
# C = N/(N-K) * log2(2N)
# C1 = N/(N-K)
# C2 = log2(2N) = ln(2N)/ln(2)
#
# dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2
# dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2))
#
# dC/dN = dC1/dN*C2 + dC2/dN*C1
# dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K))
# dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K))
# dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2)
# dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2)
# dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)
#
# Solve for minimum, where dC/dN = 0
# 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)
# 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K
# 0 = N - K*ln(2N) - K
# 0 = N - K*(ln(2N) + 1)
# 0 = N - K*ln(2Ne)
# N = K*ln(2Ne)
# N/K = ln(2Ne)
#
# e^(N/K) = e^ln(2Ne)
# e^(N/K) = 2Ne
# 1/e^(N/K) = 1/(2*N*e)
# e^(N/-K) = 1/(2*N*e)
# e^(N/-K) = K/N*1/(2*K*e)
# N/K*e^(N/-K) = 1/(2*e*K)
# N/-K*e^(N/-K) = -1/(2*e*K)
#
# Using Lambert W function
# https://en.wikipedia.org/wiki/Lambert_W_function
# x = W(y) It is the solution to y = x*e^x
# x = N/-K
# y = -1/(2*e*K)
#
# N/-K = W(-1/(2*e*K))
#
# N = -K*W(-1/(2*e*K))
overlap = s2-1
opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real
block_size = sp_fft.next_fast_len(math.ceil(opt_size))
# Use conventional FFT convolve if there is only going to be one block.
if block_size >= s1:
return fallback
if not swapped:
in1_step = block_size-s2+1
in2_step = s2
else:
in1_step = s2
in2_step = block_size-s2+1
return block_size, overlap, in1_step, in2_step
def oaconvolve(in1, in2, mode="full", axes=None):
"""Convolve two N-dimensional arrays using the overlap-add method.
Convolve `in1` and `in2` using the overlap-add method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
and generally much faster than `fftconvolve` when one array is much
larger than the other, but can be slower when only a few output values are
needed or when the arrays are very similar in shape, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
axes : int or array_like of ints or None, optional
Axes over which to compute the convolution.
The default is over all axes.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
convolve : Uses the direct convolution or FFT convolution algorithm
depending on which is faster.
fftconvolve : An implementation of convolution using FFT.
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Convolve a 100,000 sample signal with a 512-sample filter.
>>> from scipy import signal
>>> rng = np.random.default_rng()
>>> sig = rng.standard_normal(100000)
>>> filt = signal.firwin(512, 0.01)
>>> fsig = signal.oaconvolve(sig, filt)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(fsig)
>>> ax_mag.set_title('Filtered noise')
>>> fig.tight_layout()
>>> fig.show()
References
----------
.. [1] Wikipedia, "Overlap-add_method".
https://en.wikipedia.org/wiki/Overlap-add_method
.. [2] Richard G. Lyons. Understanding Digital Signal Processing,
Third Edition, 2011. Chapter 13.10.
ISBN 13: 978-0137-02741-5
"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return np.array([])
elif in1.shape == in2.shape: # Equivalent to fftconvolve
return fftconvolve(in1, in2, mode=mode, axes=axes)
in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,
sorted_axes=True)
s1 = in1.shape
s2 = in2.shape
if not axes:
ret = in1 * in2
return _apply_conv_mode(ret, s1, s2, mode, axes)
# Calculate this now since in1 is changed later
shape_final = [None if i not in axes else
s1[i] + s2[i] - 1 for i in range(in1.ndim)]
# Calculate the block sizes for the output, steps, first and second inputs.
# It is simpler to calculate them all together than doing them in separate
# loops due to all the special cases that need to be handled.
optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else
_calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim))
block_size, overlaps, \
in1_step, in2_step = zip(*optimal_sizes)
# Fall back to fftconvolve if there is only one block in every dimension.
if in1_step == s1 and in2_step == s2:
return fftconvolve(in1, in2, mode=mode, axes=axes)
# Figure out the number of steps and padding.
# This would get too complicated in a list comprehension.
nsteps1 = []
nsteps2 = []
pad_size1 = []
pad_size2 = []
for i in range(in1.ndim):
if i not in axes:
pad_size1 += [(0, 0)]
pad_size2 += [(0, 0)]
continue
if s1[i] > in1_step[i]:
curnstep1 = math.ceil((s1[i]+1)/in1_step[i])
if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]:
curnstep1 += 1
curpad1 = curnstep1*in1_step[i] - s1[i]
else:
curnstep1 = 1
curpad1 = 0
if s2[i] > in2_step[i]:
curnstep2 = math.ceil((s2[i]+1)/in2_step[i])
if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]:
curnstep2 += 1
curpad2 = curnstep2*in2_step[i] - s2[i]
else:
curnstep2 = 1
curpad2 = 0
nsteps1 += [curnstep1]
nsteps2 += [curnstep2]
pad_size1 += [(0, curpad1)]
pad_size2 += [(0, curpad2)]
# Pad the array to a size that can be reshaped to the desired shape
# if necessary.
if not all(curpad == (0, 0) for curpad in pad_size1):
in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0)
if not all(curpad == (0, 0) for curpad in pad_size2):
in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0)
# Reshape the overlap-add parts to input block sizes.
split_axes = [iax+i for i, iax in enumerate(axes)]
fft_axes = [iax+1 for iax in split_axes]
# We need to put each new dimension before the corresponding dimension
# being reshaped in order to get the data in the right layout at the end.
reshape_size1 = list(in1_step)
reshape_size2 = list(in2_step)
for i, iax in enumerate(split_axes):
reshape_size1.insert(iax, nsteps1[i])
reshape_size2.insert(iax, nsteps2[i])
in1 = in1.reshape(*reshape_size1)
in2 = in2.reshape(*reshape_size2)
# Do the convolution.
fft_shape = [block_size[i] for i in axes]
ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False)
# Do the overlap-add.
for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes):
overlap = overlaps[ax]
if overlap is None:
continue
ret, overpart = np.split(ret, [-overlap], ax_fft)
overpart = np.split(overpart, [-1], ax_split)[0]
ret_overpart = np.split(ret, [overlap], ax_fft)[0]
ret_overpart = np.split(ret_overpart, [1], ax_split)[1]
ret_overpart += overpart
# Reshape back to the correct dimensionality.
shape_ret = [ret.shape[i] if i not in fft_axes else
ret.shape[i]*ret.shape[i-1]
for i in range(ret.ndim) if i not in split_axes]
ret = ret.reshape(*shape_ret)
# Slice to the correct size.
slice_final = tuple([slice(islice) for islice in shape_final])
ret = ret[slice_final]
return _apply_conv_mode(ret, s1, s2, mode, axes)
def _numeric_arrays(arrays, kinds='buifc'):
"""
See if a list of arrays are all numeric.
Parameters
----------
ndarrays : array or list of arrays
arrays to check if numeric.
numeric_kinds : string-like
The dtypes of the arrays to be checked. If the dtype.kind of
the ndarrays are not in this string the function returns False and
otherwise returns True.
"""
if type(arrays) == np.ndarray:
return arrays.dtype.kind in kinds
for array_ in arrays:
if array_.dtype.kind not in kinds:
return False
return True
def _conv_ops(x_shape, h_shape, mode):
"""
Find the number of operations required for direct/fft methods of
convolution. The direct operations were recorded by making a dummy class to
record the number of operations by overriding ``__mul__`` and ``__add__``.
The FFT operations rely on the (well-known) computational complexity of the
FFT (and the implementation of ``_freq_domain_conv``).
"""
if mode == "full":
out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
elif mode == "valid":
out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)]
elif mode == "same":
out_shape = x_shape
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full', not mode={}".format(mode))
s1, s2 = x_shape, h_shape
if len(x_shape) == 1:
s1, s2 = s1[0], s2[0]
if mode == "full":
direct_ops = s1 * s2
elif mode == "valid":
direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2
elif mode == "same":
direct_ops = (s1 * s2 if s1 < s2 else
s1 * s2 - (s2 // 2) * ((s2 + 1) // 2))
else:
if mode == "full":
direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
elif mode == "valid":
direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
elif mode == "same":
direct_ops = _prod(s1) * _prod(s2)
full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
N = _prod(full_out_shape)
fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape
return fft_ops, direct_ops
def _fftconv_faster(x, h, mode):
"""
See if using fftconvolve or convolve is faster.
Parameters
----------
x : np.ndarray
Signal
h : np.ndarray
Kernel
mode : str
Mode passed to convolve
Returns
-------
fft_faster : bool
Notes
-----
See docstring of `choose_conv_method` for details on tuning hardware.
See pull request 11031 for more detail:
https://github.com/scipy/scipy/pull/11031.
"""
fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode)
offset = -1e-3 if x.ndim == 1 else -1e-4
constants = {
"valid": (1.89095737e-9, 2.1364985e-10, offset),
"full": (1.7649070e-9, 2.1414831e-10, offset),
"same": (3.2646654e-9, 2.8478277e-10, offset)
if h.size <= x.size
else (3.21635404e-9, 1.1773253e-8, -1e-5),
} if x.ndim == 1 else {
"valid": (1.85927e-9, 2.11242e-8, offset),
"full": (1.99817e-9, 1.66174e-8, offset),
"same": (2.04735e-9, 1.55367e-8, offset),
}
O_fft, O_direct, O_offset = constants[mode]
return O_fft * fft_ops < O_direct * direct_ops + O_offset
def _reverse_and_conj(x):
"""
Reverse array `x` in all dimensions and perform the complex conjugate
"""
reverse = (slice(None, None, -1),) * x.ndim
return x[reverse].conj()
def _np_conv_ok(volume, kernel, mode):
"""
See if numpy supports convolution of `volume` and `kernel` (i.e. both are
1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the
size of the larger input, while SciPy's uses the size of the first input.
Invalid mode strings will return False and be caught by the calling func.
"""
if volume.ndim == kernel.ndim == 1:
if mode in ('full', 'valid'):
return True
elif mode == 'same':
return volume.size >= kernel.size
else:
return False
def _timeit_fast(stmt="pass", setup="pass", repeat=3):
"""
Returns the time the statement/function took, in seconds.
Faster, less precise version of IPython's timeit. `stmt` can be a statement
written as a string or a callable.
Will do only 1 loop (like IPython's timeit) with no repetitions
(unlike IPython) for very slow functions. For fast functions, only does
enough loops to take 5 ms, which seems to produce similar results (on
Windows at least), and avoids doing an extraneous cycle that isn't
measured.
"""
timer = timeit.Timer(stmt, setup)
# determine number of calls per rep so total time for 1 rep >= 5 ms
x = 0
for p in range(0, 10):
number = 10**p
x = timer.timeit(number) # seconds
if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
break
if x > 1: # second
# If it's macroscopic, don't bother with repetitions
best = x
else:
number *= 10
r = timer.repeat(repeat, number)
best = min(r)
sec = best / number
return sec
def choose_conv_method(in1, in2, mode='full', measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`. It can also be used to determine the value of
``method`` for many different convolutions of the same dtype/shape.
In addition, it supports timing the convolution to adapt the value of
``method`` to a particular set of inputs and/or hardware.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
Generally, this method is 99% accurate for 2D signals and 85% accurate
for 1D signals for randomly chosen input sizes. For precision, use
``measure=True`` to find the fastest method by timing the convolution.
This can be used to avoid the minimal overhead of finding the fastest
``method`` later, or to adapt the value of ``method`` to a particular set
of inputs.
Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this
function. These experiments measured the ratio between the time required
when using ``method='auto'`` and the time required for the fastest method
(i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these
experiments, we found:
* There is a 95% chance of this ratio being less than 1.5 for 1D signals
and a 99% chance of being less than 2.5 for 2D signals.
* The ratio was always less than 2.5/5 for 1D/2D signals respectively.
* This function is most inaccurate for 1D convolutions that take between 1
and 10 milliseconds with ``method='direct'``. A good proxy for this
(at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``.
The 2D results almost certainly generalize to 3D/4D/etc because the
implementation is the same (the 1D implementation is different).
All the numbers above are specific to the EC2 machine. However, we did find
that this function generalizes fairly decently across hardware. The speed
tests were of similar quality (and even slightly better) than the same
tests performed on the machine to tune this function's numbers (a mid-2014
15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor).
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 0.19
Examples
--------
Estimate the fastest method for a given input:
>>> from scipy import signal
>>> rng = np.random.default_rng()
>>> img = rng.random((32, 32))
>>> filter = rng.random((8, 8))
>>> method = signal.choose_conv_method(img, filter, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> img2 = rng.random((32, 32))
>>> filter2 = rng.random((8, 8))
>>> corr2 = signal.correlate(img2, filter2, mode='same', method=method)
>>> conv2 = signal.convolve(img2, filter2, mode='same', method=method)
The output of this function (``method``) works with `correlate` and
`convolve`.
"""
volume = np.asarray(in1)
kernel = np.asarray(in2)
if measure:
times = {}
for method in ['fft', 'direct']:
times[method] = _timeit_fast(lambda: convolve(volume, kernel,
mode=mode, method=method))
chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
return chosen_method, times
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return 'direct'
if _numeric_arrays([volume, kernel], kinds='b'):
return 'direct'
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return 'fft'
return 'direct'
def convolve(in1, in2, mode='full', method='auto'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the convolution.
``direct``
The convolution is determined directly from sums, the definition of
convolution.
``fft``
The Fourier Transform is used to perform the convolution by calling
`fftconvolve`.
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
choose_conv_method : chooses the fastest appropriate convolution method
fftconvolve : Always uses the FFT method.
oaconvolve : Uses the overlap-add method to do convolution, which is
generally faster when the input arrays are large and
significantly different in size.
Notes
-----
By default, `convolve` and `correlate` use ``method='auto'``, which calls
`choose_conv_method` to choose the fastest method using pre-computed
values (`choose_conv_method` can also measure real-world timing with a
keyword argument). Because `fftconvolve` relies on floating point numbers,
there are certain constraints that may force `method=direct` (more detail
in `choose_conv_method` docstring).
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.windows.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = np.asarray(in1)
kernel = np.asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
elif volume.ndim != kernel.ndim:
raise ValueError("volume and kernel should have the same "
"dimensionality")
if _inputs_swap_needed(mode, volume.shape, kernel.shape):
# Convolution is commutative; order doesn't have any effect on output
volume, kernel = kernel, volume
if method == 'auto':
method = choose_conv_method(volume, kernel, mode=mode)
if method == 'fft':
out = fftconvolve(volume, kernel, mode=mode)
result_type = np.result_type(volume, kernel)
if result_type.kind in {'u', 'i'}:
out = np.around(out)
return out.astype(result_type)
elif method == 'direct':
# fastpath to faster numpy.convolve for 1d inputs when possible
if _np_conv_ok(volume, kernel, mode):
return np.convolve(volume, kernel, mode)
return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
else:
raise ValueError("Acceptable method flags are 'auto',"
" 'direct', or 'fft'.")
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-D array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `a`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `a`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = np.asarray(domain)
for dimsize in domain.shape:
if (dimsize % 2) != 1:
raise ValueError("Each dimension of domain argument "
"should have an odd number of elements.")
return _sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`. The array will automatically be zero-padded.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
Warns
-----
UserWarning
If array size is smaller than kernel size along any dimension
See Also
--------
scipy.ndimage.median_filter
scipy.signal.medfilt2d
Notes
-----
The more general function `scipy.ndimage.median_filter` has a more
efficient implementation of a median filter and therefore runs much faster.
For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes,
the specialised function `scipy.signal.medfilt2d` may be faster.
"""
volume = np.atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * volume.ndim
kernel_size = np.asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(volume.ndim):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
if any(k > s for k, s in zip(kernel_size, volume.shape)):
warnings.warn('kernel_size exceeds volume extent: the volume will be '
'zero-padded.')
domain = np.ones(kernel_size, dtype=volume.dtype)
numels = np.prod(kernel_size, axis=0)
order = numels // 2
return _sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
Examples
--------
>>> from scipy.misc import face
>>> from scipy.signal import wiener
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> img = rng.random((40, 40)) #Create a random image
>>> filtered_img = wiener(img, (5, 5)) #Filter the image
>>> f, (plot1, plot2) = plt.subplots(1, 2)
>>> plot1.imshow(img)
>>> plot2.imshow(filtered_img)
>>> plt.show()
Notes
-----
This implementation is similar to wiener2 in Matlab/Octave.
For more details see [1]_
References
----------
.. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing,
Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548.
"""
im = np.asarray(im)
if mysize is None:
mysize = [3] * im.ndim
mysize = np.asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, np.ones(mysize), 'same') / np.prod(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, np.ones(mysize), 'same') /
np.prod(mysize, axis=0) - lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = np.mean(np.ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = np.where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> ascent = misc.ascent()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(ascent, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('convolve2d inputs must both be 2-D arrays')
if _inputs_swap_needed(mode, in1.shape, in2.shape):
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = _sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
When using "same" mode with even-length inputs, the outputs of `correlate`
and `correlate2d` differ: There is a 1-index offset between them.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> rng = np.random.default_rng()
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + rng.standard_normal(face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('correlate2d inputs must both be 2-D arrays')
swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
if swapped_inputs:
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = _sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue)
if swapped_inputs:
out = out[::-1, ::-1]
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd). The array is zero-padded
automatically.
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
See also
--------
scipy.ndimage.median_filter
Notes
-----
This is faster than `medfilt` when the input dtype is ``uint8``,
``float32``, or ``float64``; for other types, this falls back to
`medfilt`; you should use `scipy.ndimage.median_filter` instead as it is
much faster. In some situations, `scipy.ndimage.median_filter` may be
faster than this function.
"""
image = np.asarray(input)
# checking dtype.type, rather than just dtype, is necessary for
# excluding np.longdouble with MS Visual C.
if image.dtype.type not in (np.ubyte, np.single, np.double):
return medfilt(image, kernel_size)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = np.asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return _sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
The function `sosfilt` (and filter design using ``output='sos'``) should be
preferred over `lfilter` for most filtering tasks, as second-order sections
have fewer numerical problems.
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
sosfiltfilt: A forward-backward filter using second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
- a[1]*y[n-1] - ... - a[N]*y[n-N]
where `M` is the degree of the numerator, `N` is the degree of the
denominator, and `n` is the sample number. It is implemented using
the following difference equations (assuming M = N)::
a[0]*y[n] = b[0] * x[n] + d[0][n-1]
d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
...
d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
d[N-1][n] = b[N] * x[n] - a[N] * y[n]
where `d` are the state variables.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -M
b[0] + b[1]z + ... + b[M] z
Y(z) = -------------------------------- X(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Examples
--------
Generate a noisy signal to be filtered:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> t = np.linspace(-1, 1, 201)
>>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
... 0.1*np.sin(2*np.pi*1.25*t + 1) +
... 0.18*np.cos(2*np.pi*3.85*t))
>>> xn = x + rng.standard_normal(len(t)) * 0.08
Create an order 3 lowpass butterworth filter:
>>> b, a = signal.butter(3, 0.05)
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
>>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
Use filtfilt to apply the filter:
>>> y = signal.filtfilt(b, a, xn)
Plot the original signal and the various filtered versions:
>>> plt.figure
>>> plt.plot(t, xn, 'b', alpha=0.75)
>>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
>>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
... 'filtfilt'), loc='best')
>>> plt.grid(True)
>>> plt.show()
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = _validate_x(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of
# singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[tuple(ind)] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[tuple(ind)]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[tuple(ind)]
return out, zf
else:
if zi is None:
return _sigtools._linear_filter(b, a, x, axis)
else:
return _sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter given input and output vectors.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
where ``K = max(M, N)``.
See Also
--------
lfilter, lfilter_zi
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = np.asarray(y)
if x is None:
result_type = np.result_type(np.asarray(b), np.asarray(a), y)
if result_type.kind in 'bui':
result_type = np.float64
x = np.zeros(M, dtype=result_type)
else:
x = np.asarray(x)
result_type = np.result_type(np.asarray(b), np.asarray(a), y, x)
if result_type.kind in 'bui':
result_type = np.float64
x = x.astype(result_type)
L = np.size(x)
if L < M:
x = np.r_[x, np.zeros(M - L)]
y = y.astype(result_type)
zi = np.zeros(K, result_type)
L = np.size(y)
if L < N:
y = np.r_[y, np.zeros(N - L)]
for m in range(M):
zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal`` using inverse filtering.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See Also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = np.atleast_1d(signal)
den = np.atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = np.zeros(N - D + 1, float)
input[0] = 1
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
--------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the
instantaneous phase in respect to time. The instantaneous phase corresponds
to the phase angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = (np.diff(instantaneous_phase) /
... (2.0*np.pi) * fs)
>>> fig, (ax0, ax1) = plt.subplots(nrows=2)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
>>> fig.tight_layout()
References
----------
.. [1] Wikipedia, "Analytic signal".
https://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
Processing, Third Edition, 2009. Chapter 12.
ISBN 13: 978-1292-02572-8
"""
x = np.asarray(x)
if np.iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = sp_fft.fft(x, N, axis=axis)
h = np.zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if x.ndim > 1:
ind = [np.newaxis] * x.ndim
ind[axis] = slice(None)
h = h[tuple(ind)]
x = sp_fft.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
https://en.wikipedia.org/wiki/Analytic_signal
"""
x = np.atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be 2-D.")
if np.iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = sp_fft.fft2(x, N, axes=(0, 1))
h1 = np.zeros(N[0], 'd')
h2 = np.zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, np.newaxis] * h2[np.newaxis, :]
k = x.ndim
while k > 2:
h = h[:, np.newaxis]
k -= 1
x = sp_fft.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
Examples
--------
>>> from scipy import signal
>>> vals = [1, 4, 1+1.j, 3]
>>> p_sorted, indx = signal.cmplx_sort(vals)
>>> p_sorted
array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])
>>> indx
array([0, 2, 3, 1])
"""
p = np.asarray(p)
indx = np.argsort(abs(p))
return np.take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal in terms of
the distance between them. Default is 1e-3. Refer to Notes about
the details on roots grouping.
rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max', 'maximum': pick the maximum of those roots
- 'min', 'minimum': pick the minimum of those roots
- 'avg', 'mean': take the average of those roots
When finding minimum or maximum among complex roots they are compared
first by the real part and then by the imaginary part.
Returns
-------
unique : ndarray
The list of unique roots.
multiplicity : ndarray
The multiplicity of each root.
Notes
-----
If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to
``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it
doesn't necessarily mean that ``a`` is close to ``c``. It means that roots
grouping is not unique. In this function we use "greedy" grouping going
through the roots in the order they are given in the input `p`.
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
reduce = np.max
elif rtype in ['min', 'minimum']:
reduce = np.min
elif rtype in ['avg', 'mean']:
reduce = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = np.asarray(p)
points = np.empty((len(p), 2))
points[:, 0] = np.real(p)
points[:, 1] = np.imag(p)
tree = cKDTree(points)
p_unique = []
p_multiplicity = []
used = np.zeros(len(p), dtype=bool)
for i in range(len(p)):
if used[i]:
continue
group = tree.query_ball_point(points[i], tol)
group = [x for x in group if not used[x]]
p_unique.append(reduce(p[group]))
p_multiplicity.append(len(group))
used[group] = True
return np.asarray(p_unique), np.asarray(p_multiplicity)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""Compute b(s) and a(s) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `invresz`.
Parameters
----------
r : array_like
Residues corresponding to the poles. For repeated poles, the residues
must be ordered to correspond to ascending by power fractions.
p : array_like
Poles. Equal poles must be adjacent.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal in terms of
the distance between them. Default is 1e-3. See `unique_roots`
for further details.
rtype : {'avg', 'min', 'max'}, optional
Method for computing a root to represent a group of identical roots.
Default is 'avg'. See `unique_roots` for further details.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residue, invresz, unique_roots
"""
r = np.atleast_1d(r)
p = np.atleast_1d(p)
k = np.trim_zeros(np.atleast_1d(k), 'f')
unique_poles, multiplicity = _group_poles(p, tol, rtype)
factors, denominator = _compute_factors(unique_poles, multiplicity,
include_powers=True)
if len(k) == 0:
numerator = 0
else:
numerator = np.polymul(k, denominator)
for residue, factor in zip(r, factors):
numerator = np.polyadd(numerator, residue * factor)
return numerator, denominator
def _compute_factors(roots, multiplicity, include_powers=False):
"""Compute the total polynomial divided by factors for each root."""
current = np.array([1])
suffixes = [current]
for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]):
monomial = np.array([1, -pole])
for _ in range(mult):
current = np.polymul(current, monomial)
suffixes.append(current)
suffixes = suffixes[::-1]
factors = []
current = np.array([1])
for pole, mult, suffix in zip(roots, multiplicity, suffixes):
monomial = np.array([1, -pole])
block = []
for i in range(mult):
if i == 0 or include_powers:
block.append(np.polymul(current, suffix))
current = np.polymul(current, monomial)
factors.extend(reversed(block))
return factors, current
def _compute_residues(poles, multiplicity, numerator):
denominator_factors, _ = _compute_factors(poles, multiplicity)
numerator = numerator.astype(poles.dtype)
residues = []
for pole, mult, factor in zip(poles, multiplicity,
denominator_factors):
if mult == 1:
residues.append(np.polyval(numerator, pole) /
np.polyval(factor, pole))
else:
numer = numerator.copy()
monomial = np.array([1, -pole])
factor, d = np.polydiv(factor, monomial)
block = []
for _ in range(mult):
numer, n = np.polydiv(numer, monomial)
r = n[0] / d[0]
numer = np.polysub(numer, r * factor)
block.append(r)
residues.extend(reversed(block))
return np.asarray(residues)
def residue(b, a, tol=1e-3, rtype='avg'):
"""Compute partial-fraction expansion of b(s) / a(s).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `residuez`.
See Notes for details about the algorithm.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
tol : float, optional
The tolerance for two roots to be considered equal in terms of
the distance between them. Default is 1e-3. See `unique_roots`
for further details.
rtype : {'avg', 'min', 'max'}, optional
Method for computing a root to represent a group of identical roots.
Default is 'avg'. See `unique_roots` for further details.
Returns
-------
r : ndarray
Residues corresponding to the poles. For repeated poles, the residues
are ordered to correspond to ascending by power fractions.
p : ndarray
Poles ordered by magnitude in ascending order.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, residuez, numpy.poly, unique_roots
Notes
-----
The "deflation through subtraction" algorithm is used for
computations --- method 6 in [1]_.
The form of partial fraction expansion depends on poles multiplicity in
the exact mathematical sense. However there is no way to exactly
determine multiplicity of roots of a polynomial in numerical computing.
Thus you should think of the result of `residue` with given `tol` as
partial fraction expansion computed for the denominator composed of the
computed poles with empirically determined multiplicity. The choice of
`tol` can drastically change the result if there are close poles.
References
----------
.. [1] J. F. Mahoney, B. D. Sivazlian, "Partial fractions expansion: a
review of computational methodology and efficiency", Journal of
Computational and Applied Mathematics, Vol. 9, 1983.
"""
b = np.asarray(b)
a = np.asarray(a)
if (np.issubdtype(b.dtype, np.complexfloating)
or np.issubdtype(a.dtype, np.complexfloating)):
b = b.astype(complex)
a = a.astype(complex)
else:
b = b.astype(float)
a = a.astype(float)
b = np.trim_zeros(np.atleast_1d(b), 'f')
a = np.trim_zeros(np.atleast_1d(a), 'f')
if a.size == 0:
raise ValueError("Denominator `a` is zero.")
poles = np.roots(a)
if b.size == 0:
return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])
if len(b) < len(a):
k = np.empty(0)
else:
k, b = np.polydiv(b, a)
unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)
unique_poles, order = cmplx_sort(unique_poles)
multiplicity = multiplicity[order]
residues = _compute_residues(unique_poles, multiplicity, b)
index = 0
for pole, mult in zip(unique_poles, multiplicity):
poles[index:index + mult] = pole
index += mult
return residues / a[0], poles, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""Compute partial-fraction expansion of b(z) / a(z).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `residue`.
See Notes of `residue` for details about the algorithm.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
tol : float, optional
The tolerance for two roots to be considered equal in terms of
the distance between them. Default is 1e-3. See `unique_roots`
for further details.
rtype : {'avg', 'min', 'max'}, optional
Method for computing a root to represent a group of identical roots.
Default is 'avg'. See `unique_roots` for further details.
Returns
-------
r : ndarray
Residues corresponding to the poles. For repeated poles, the residues
are ordered to correspond to ascending by power fractions.
p : ndarray
Poles ordered by magnitude in ascending order.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invresz, residue, unique_roots
"""
b = np.asarray(b)
a = np.asarray(a)
if (np.issubdtype(b.dtype, np.complexfloating)
or np.issubdtype(a.dtype, np.complexfloating)):
b = b.astype(complex)
a = a.astype(complex)
else:
b = b.astype(float)
a = a.astype(float)
b = np.trim_zeros(np.atleast_1d(b), 'b')
a = np.trim_zeros(np.atleast_1d(a), 'b')
if a.size == 0:
raise ValueError("Denominator `a` is zero.")
elif a[0] == 0:
raise ValueError("First coefficient of determinant `a` must be "
"non-zero.")
poles = np.roots(a)
if b.size == 0:
return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])
b_rev = b[::-1]
a_rev = a[::-1]
if len(b_rev) < len(a_rev):
k_rev = np.empty(0)
else:
k_rev, b_rev = np.polydiv(b_rev, a_rev)
unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)
unique_poles, order = cmplx_sort(unique_poles)
multiplicity = multiplicity[order]
residues = _compute_residues(1 / unique_poles, multiplicity, b_rev)
index = 0
powers = np.empty(len(residues), dtype=int)
for pole, mult in zip(unique_poles, multiplicity):
poles[index:index + mult] = pole
powers[index:index + mult] = 1 + np.arange(mult)
index += mult
residues *= (-poles) ** powers / a_rev[0]
return residues, poles, k_rev[::-1]
def _group_poles(poles, tol, rtype):
if rtype in ['max', 'maximum']:
reduce = np.max
elif rtype in ['min', 'minimum']:
reduce = np.min
elif rtype in ['avg', 'mean']:
reduce = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
unique = []
multiplicity = []
pole = poles[0]
block = [pole]
for i in range(1, len(poles)):
if abs(poles[i] - pole) <= tol:
block.append(pole)
else:
unique.append(reduce(block))
multiplicity.append(len(block))
pole = poles[i]
block = [pole]
unique.append(reduce(block))
multiplicity.append(len(block))
return np.asarray(unique), np.asarray(multiplicity)
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""Compute b(z) and a(z) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `invres`.
Parameters
----------
r : array_like
Residues corresponding to the poles. For repeated poles, the residues
must be ordered to correspond to ascending by power fractions.
p : array_like
Poles. Equal poles must be adjacent.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal in terms of
the distance between them. Default is 1e-3. See `unique_roots`
for further details.
rtype : {'avg', 'min', 'max'}, optional
Method for computing a root to represent a group of identical roots.
Default is 'avg'. See `unique_roots` for further details.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residuez, unique_roots, invres
"""
r = np.atleast_1d(r)
p = np.atleast_1d(p)
k = np.trim_zeros(np.atleast_1d(k), 'b')
unique_poles, multiplicity = _group_poles(p, tol, rtype)
factors, denominator = _compute_factors(unique_poles, multiplicity,
include_powers=True)
if len(k) == 0:
numerator = 0
else:
numerator = np.polymul(k[::-1], denominator[::-1])
for residue, factor in zip(r, factors):
numerator = np.polyadd(numerator, residue * factor[::-1])
return numerator[::-1], denominator
def resample(x, num, t=None, axis=0, window=None, domain='time'):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the equally spaced sample
positions associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
domain : string, optional
A string indicating the domain of the input `x`:
``time`` Consider the input `x` as time-domain (Default),
``freq`` Consider the input `x` as frequency-domain.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it is used solely to calculate the resampled
positions `resampled_t`
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fft.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
if domain not in ('time', 'freq'):
raise ValueError("Acceptable domain flags are 'time' or"
" 'freq', not domain={}".format(domain))
x = np.asarray(x)
Nx = x.shape[axis]
# Check if we can use faster real FFT
real_input = np.isrealobj(x)
if domain == 'time':
# Forward transform
if real_input:
X = sp_fft.rfft(x, axis=axis)
else: # Full complex FFT
X = sp_fft.fft(x, axis=axis)
else: # domain == 'freq'
X = x
# Apply window to spectrum
if window is not None:
if callable(window):
W = window(sp_fft.fftfreq(Nx))
elif isinstance(window, np.ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = sp_fft.ifftshift(get_window(window, Nx))
newshape_W = [1] * x.ndim
newshape_W[axis] = X.shape[axis]
if real_input:
# Fold the window back on itself to mimic complex behavior
W_real = W.copy()
W_real[1:] += W_real[-1:0:-1]
W_real[1:] *= 0.5
X *= W_real[:newshape_W[axis]].reshape(newshape_W)
else:
X *= W.reshape(newshape_W)
# Copy each half of the original spectrum to the output spectrum, either
# truncating high frequences (downsampling) or zero-padding them
# (upsampling)
# Placeholder array for output spectrum
newshape = list(x.shape)
if real_input:
newshape[axis] = num // 2 + 1
else:
newshape[axis] = num
Y = np.zeros(newshape, X.dtype)
# Copy positive frequency components (and Nyquist, if present)
N = min(num, Nx)
nyq = N // 2 + 1 # Slice index that includes Nyquist if present
sl = [slice(None)] * x.ndim
sl[axis] = slice(0, nyq)
Y[tuple(sl)] = X[tuple(sl)]
if not real_input:
# Copy negative frequency components
if N > 2: # (slice expression doesn't collapse to empty array)
sl[axis] = slice(nyq - N, None)
Y[tuple(sl)] = X[tuple(sl)]
# Split/join Nyquist component(s) if present
# So far we have set Y[+N/2]=X[+N/2]
if N % 2 == 0:
if num < Nx: # downsampling
if real_input:
sl[axis] = slice(N//2, N//2 + 1)
Y[tuple(sl)] *= 2.
else:
# select the component of Y at frequency +N/2,
# add the component of X at -N/2
sl[axis] = slice(-N//2, -N//2 + 1)
Y[tuple(sl)] += X[tuple(sl)]
elif Nx < num: # upsampling
# select the component at frequency +N/2 and halve it
sl[axis] = slice(N//2, N//2 + 1)
Y[tuple(sl)] *= 0.5
if not real_input:
temp = Y[tuple(sl)]
# set the component at -N/2 equal to the component at +N/2
sl[axis] = slice(num-N//2, num-N//2 + 1)
Y[tuple(sl)] = temp
# Inverse transform
if real_input:
y = sp_fft.irfft(Y, num, axis=axis)
else:
y = sp_fft.ifft(Y, axis=axis, overwrite_x=True)
y *= (float(num) / float(Nx))
if t is None:
return y
else:
new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0),
padtype='constant', cval=None):
"""
Resample `x` along the given axis using polyphase filtering.
The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. By default, values beyond the boundary of the signal are assumed
to be zero during the filtering step.
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : string, tuple, or array_like, optional
Desired window to use to design the low-pass filter, or the FIR filter
coefficients to employ. See below for details.
padtype : string, optional
`constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of
the other signal extension modes supported by `scipy.signal.upfirdn`.
Changes assumptions on values beyond the boundary. If `constant`,
assumed to be `cval` (default zero). If `line` assumed to continue a
linear trend defined by the first and last points. `mean`, `median`,
`maximum` and `minimum` work as in `np.pad` and assume that the values
beyond the boundary are the mean, median, maximum or minimum
respectively of the array along the axis.
.. versionadded:: 1.4.0
cval : float, optional
Value to use if `padtype='constant'`. Default is zero.
.. versionadded:: 1.4.0
Returns
-------
resampled_x : array
The resampled array.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample : Resample up or down using the FFT method.
Notes
-----
This polyphase method will likely be faster than the Fourier method
in `scipy.signal.resample` when the number of samples is large and
prime, or when the number of samples is large and `up` and `down`
share a large greatest common denominator. The length of the FIR
filter used will depend on ``max(up, down) // gcd(up, down)``, and
the number of operations during polyphase filtering will depend on
the filter length and `down` (see `scipy.signal.upfirdn` for details).
The argument `window` specifies the FIR low-pass filter design.
If `window` is an array_like it is assumed to be the FIR filter
coefficients. Note that the FIR filter is applied after the upsampling
step, so it should be designed to operate on a signal at a sampling
frequency higher than the original by a factor of `up//gcd(up, down)`.
This function's output will be centered with respect to this array, so it
is best to pass a symmetric filter with an odd number of samples if, as
is usually the case, a zero-phase filter is desired.
For any other type of `window`, the functions `scipy.signal.get_window`
and `scipy.signal.firwin` are called to generate the appropriate filter
coefficients.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * down / float(up)``.
Examples
--------
By default, the end of the resampled data rises to meet the first
sample of the next cycle for the FFT method, and gets closer to zero
for the polyphase method:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f_fft = signal.resample(y, 100)
>>> f_poly = signal.resample_poly(y, 100, 20)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
>>> plt.plot(x, y, 'ko-')
>>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
>>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
>>> plt.show()
This default behaviour can be changed by using the padtype option:
>>> import numpy as np
>>> from scipy import signal
>>> N = 5
>>> x = np.linspace(0, 1, N, endpoint=False)
>>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x)
>>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x)
>>> Y = np.stack([y, y2], axis=-1)
>>> up = 4
>>> xr = np.linspace(0, 1, N*up, endpoint=False)
>>> y2 = signal.resample_poly(Y, up, 1, padtype='constant')
>>> y3 = signal.resample_poly(Y, up, 1, padtype='mean')
>>> y4 = signal.resample_poly(Y, up, 1, padtype='line')
>>> import matplotlib.pyplot as plt
>>> for i in [0,1]:
... plt.figure()
... plt.plot(xr, y4[:,i], 'g.', label='line')
... plt.plot(xr, y3[:,i], 'y.', label='mean')
... plt.plot(xr, y2[:,i], 'r.', label='constant')
... plt.plot(x, Y[:,i], 'k-')
... plt.legend()
>>> plt.show()
"""
x = np.asarray(x)
if up != int(up):
raise ValueError("up must be an integer")
if down != int(down):
raise ValueError("down must be an integer")
up = int(up)
down = int(down)
if up < 1 or down < 1:
raise ValueError('up and down must be >= 1')
if cval is not None and padtype != 'constant':
raise ValueError('cval has no effect when padtype is ', padtype)
# Determine our up and down factors
# Use a rational approximation to save computation time on really long
# signals
g_ = math.gcd(up, down)
up //= g_
down //= g_
if up == down == 1:
return x.copy()
n_in = x.shape[axis]
n_out = n_in * up
n_out = n_out // down + bool(n_out % down)
if isinstance(window, (list, np.ndarray)):
window = np.array(window) # use array to force a copy (we modify it)
if window.ndim > 1:
raise ValueError('window must be 1-D')
half_len = (window.size - 1) // 2
h = window
else:
# Design a linear-phase low-pass FIR filter
max_rate = max(up, down)
f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
half_len = 10 * max_rate # reasonable cutoff for sinc-like function
h = firwin(2 * half_len + 1, f_c,
window=window).astype(x.dtype) # match dtype of x
h *= up
# Zero-pad our filter to put the output samples at the center
n_pre_pad = (down - half_len % down)
n_post_pad = 0
n_pre_remove = (half_len + n_pre_pad) // down
# We should rarely need to do this given our filter lengths...
while _output_len(len(h) + n_pre_pad + n_post_pad, n_in,
up, down) < n_out + n_pre_remove:
n_post_pad += 1
h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h,
np.zeros(n_post_pad, dtype=h.dtype)))
n_pre_remove_end = n_pre_remove + n_out
# Remove background depending on the padtype option
funcs = {'mean': np.mean, 'median': np.median,
'minimum': np.amin, 'maximum': np.amax}
upfirdn_kwargs = {'mode': 'constant', 'cval': 0}
if padtype in funcs:
background_values = funcs[padtype](x, axis=axis, keepdims=True)
elif padtype in _upfirdn_modes:
upfirdn_kwargs = {'mode': padtype}
if padtype == 'constant':
if cval is None:
cval = 0
upfirdn_kwargs['cval'] = cval
else:
raise ValueError(
'padtype must be one of: maximum, mean, median, minimum, ' +
', '.join(_upfirdn_modes))
if padtype in funcs:
x = x - background_values
# filter then remove excess
y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs)
keep = [slice(None), ]*x.ndim
keep[axis] = slice(n_pre_remove, n_pre_remove_end)
y_keep = y[tuple(keep)]
# Add background back
if padtype in funcs:
y_keep += background_values
return y_keep
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
:doi:`10.1063/1.3670512`.
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
:doi:`10.1007/s00422-013-0560-8`.
'''
events = np.asarray(events)
period = np.asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = np.atleast_2d(events)
period = np.atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = np.exp(np.dot(2j*np.pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = np.mean(vectors, axis=1)
strength = abs(vectormean)
phase = np.angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`. This parameter
only has an effect when ``type == 'linear'``.
overwrite_data : bool, optional
If True, perform in place detrending and avoid a copy. Default is False
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> from numpy.random import default_rng
>>> rng = default_rng()
>>> npoints = 1000
>>> noise = rng.standard_normal(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max()
0.06 # random
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = np.asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - np.mean(data, axis, keepdims=True)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = np.sort(np.unique(np.r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = np.r_[axis, 0:axis, axis + 1:rnk]
newdata = np.reshape(np.transpose(data, tuple(newdims)),
(N, _prod(dshape) // N))
if not overwrite_data:
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = np.ones((Npts, 2), dtype)
A[:, 0] = np.cast[dtype](np.arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - np.dot(A, coef)
# Put data back in original shape.
tdshape = np.take(dshape, newdims, 0)
ret = np.reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = np.transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Construct initial conditions for lfilter for step response steady-state.
Compute an initial state `zi` for the `lfilter` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
See Also
--------
lfilter, lfiltic, filtfilt
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1, dtype=np.result_type(a, b)) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Construct initial conditions for sosfilt for step response steady-state.
Compute an initial state `zi` for the `sosfilt` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
if sos.dtype.kind in 'bui':
sos = sos.astype(np.float64)
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2), dtype=sos.dtype)
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
Apply a digital filter forward and backward to a signal.
This function applies a linear digital filter twice, once forward and
once backwards. The combined filter has zero phase and a filter order
twice that of the original.
The function provides options for handling the edges of the signal.
The function `sosfiltfilt` (and filter design using ``output='sos'``)
should be preferred over `filtfilt` for most filtering tasks, as
second-order sections have fewer numerical problems.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
Notes
-----
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
`sig` is a random input signal to be filtered.
>>> rng = np.random.default_rng()
>>> n = 60
>>> sig = rng.standard_normal(n)**3 + 3*rng.standard_normal(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = rng.standard_normal(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# method == "pad"
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=max(len(a), len(b)))
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be greater "
"than padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def _validate_x(x):
x = np.asarray(x)
if x.ndim == 0:
raise ValueError('x must be at least 1-D')
return x
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections.
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = signal.unit_impulse(700)
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = _validate_x(x)
sos, n_sections = _validate_sos(sos)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
inputs = [sos, x]
if zi is not None:
inputs.append(np.asarray(zi))
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
if zi is not None:
zi = np.array(zi, dtype) # make a copy so that we can operate in place
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r, got %r.' %
(axis, x.shape, n_sections, x_zi_shape, zi.shape))
return_zi = True
else:
zi = np.zeros(x_zi_shape, dtype=dtype)
return_zi = False
axis = axis % x.ndim # make positive
x = np.moveaxis(x, axis, -1)
zi = np.moveaxis(zi, [0, axis + 1], [-2, -1])
x_shape, zi_shape = x.shape, zi.shape
x = np.reshape(x, (-1, x.shape[-1]))
x = np.array(x, dtype, order='C') # make a copy, can modify in place
zi = np.ascontiguousarray(np.reshape(zi, (-1, n_sections, 2)))
sos = sos.astype(dtype, copy=False)
_sosfilt(sos, x, zi)
x.shape = x_shape
x = np.moveaxis(x, -1, axis)
if return_zi:
zi.shape = zi_shape
zi = np.moveaxis(zi, [-2, -1], [0, axis + 1])
out = (x, zi)
else:
out = x
return out
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward digital filter using cascaded second-order sections.
See `filtfilt` for more complete information about this method.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is::
3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
(sos[:, 5] == 0).sum()))
The extra subtraction at the end attempts to compensate for poles
and zeros at the origin (e.g. for odd-order filters) to yield
equivalent estimates of `padlen` to those of `filtfilt` for
second-order section filters built with `scipy.signal` functions.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
filtfilt, sosfilt, sosfilt_zi, sosfreqz
Notes
-----
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy.signal import sosfiltfilt, butter
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
Create an interesting signal to filter.
>>> n = 201
>>> t = np.linspace(0, 1, n)
>>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*rng.standard_normal(n)
Create a lowpass Butterworth filter, and use it to filter `x`.
>>> sos = butter(4, 0.125, output='sos')
>>> y = sosfiltfilt(sos, x)
For comparison, apply an 8th order filter using `sosfilt`. The filter
is initialized using the mean of the first four values of `x`.
>>> from scipy.signal import sosfilt, sosfilt_zi
>>> sos8 = butter(8, 0.125, output='sos')
>>> zi = x[:4].mean() * sosfilt_zi(sos8)
>>> y2, zo = sosfilt(sos8, x, zi=zi)
Plot the results. Note that the phase of `y` matches the input, while
`y2` has a significant phase delay.
>>> plt.plot(t, x, alpha=0.5, label='x(t)')
>>> plt.plot(t, y, label='y(t)')
>>> plt.plot(t, y2, label='y2(t)')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.xlabel('t')
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
x = _validate_x(x)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):
"""
Downsample the signal after applying an anti-aliasing filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with Hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : array_like
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor. When using IIR downsampling, it is recommended
to call `decimate` multiple times for downsampling factors higher than
13.
n : int, optional
The order of the filter (1 less than the length for 'fir'). Defaults to
8 for 'iir' and 20 times the downsampling factor for 'fir'.
ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
of an `dlti` object, uses that object to filter before downsampling.
axis : int, optional
The axis along which to decimate.
zero_phase : bool, optional
Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
when using an IIR filter, and shifting the outputs back by the filter's
group delay when using an FIR filter. The default value of ``True`` is
recommended, since a phase shift is generally not desired.
.. versionadded:: 0.18.0
Returns
-------
y : ndarray
The down-sampled signal.
See Also
--------
resample : Resample up or down using the FFT method.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The ``zero_phase`` keyword was added in 0.18.0.
The possibility to use instances of ``dlti`` as ``ftype`` was added in
0.18.0.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Define wave parameters.
>>> wave_duration = 3
>>> sample_rate = 100
>>> freq = 2
>>> q = 5
Calculate number of samples.
>>> samples = wave_duration*sample_rate
>>> samples_decimated = int(samples/q)
Create cosine wave.
>>> x = np.linspace(0, wave_duration, samples, endpoint=False)
>>> y = np.cos(x*np.pi*freq*2)
Decimate cosine wave.
>>> ydem = signal.decimate(y, q)
>>> xnew = np.linspace(0, wave_duration, samples_decimated, endpoint=False)
Plot original and decimated waves.
>>> plt.plot(x, y, '.-', xnew, ydem, 'o-')
>>> plt.xlabel('Time, Seconds')
>>> plt.legend(['data', 'decimated'], loc='best')
>>> plt.show()
"""
x = np.asarray(x)
q = operator.index(q)
if n is not None:
n = operator.index(n)
if ftype == 'fir':
if n is None:
half_len = 10 * q # reasonable cutoff for our sinc-like function
n = 2 * half_len
b, a = firwin(n+1, 1. / q, window='hamming'), 1.
elif ftype == 'iir':
if n is None:
n = 8
system = dlti(*cheby1(n, 0.05, 0.8 / q))
b, a = system.num, system.den
elif isinstance(ftype, dlti):
system = ftype._as_tf() # Avoids copying if already in TF form
b, a = system.num, system.den
else:
raise ValueError('invalid ftype')
result_type = x.dtype
if result_type.kind in 'bui':
result_type = np.float64
b = np.asarray(b, dtype=result_type)
a = np.asarray(a, dtype=result_type)
sl = [slice(None)] * x.ndim
a = np.asarray(a)
if a.size == 1: # FIR case
b = b / a
if zero_phase:
y = resample_poly(x, 1, q, axis=axis, window=b)
else:
# upfirdn is generally faster than lfilter by a factor equal to the
# downsampling factor, since it only calculates the needed outputs
n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
y = upfirdn(b, x, up=1, down=q, axis=axis)
sl[axis] = slice(None, n_out, None)
else: # IIR case
if zero_phase:
y = filtfilt(b, a, x, axis=axis)
else:
y = lfilter(b, a, x, axis=axis)
sl[axis] = slice(None, None, q)
return y[tuple(sl)]
|
matthew-brett/scipy
|
scipy/signal/_signaltools.py
|
Python
|
bsd-3-clause
| 153,038
|
[
"Gaussian"
] |
a147e9d78527ac627bb4a2d3d474218c7507f6d9a08b023b78212d6e9936fd33
|
# IPython log file
x = np.array([[0.0, 1.5, 2.7],
[1.5, 0.0, 0.0],
[2.7, 0.0, 0.0]])
y = sparse.csr_matrix(x)
y
import networkx as nx
g = nx.from_scipy_sparse_matrix(y)
g[0][1]
get_ipython().run_line_magic('cd', '~/Dropbox/data1/drosophila-embryo/')
get_ipython().run_line_magic('ls', '')
from gala import imio
v = imio.read_h5_stack('embA_0.3um_Probabilities.h5')
np.prod(v[..., 0]) * 8
np.prod(v[..., 0].shape) * 8
np.prod(v[..., 0].shape) * 8 / 1.9
np.prod(v[..., 0].shape) * 8 / 1e9
np.prod(v[..., 0].shape) * 2 / 1e9
v.shape
smoothed_vm = filters.gaussian(v[..., 0], sigma=4)
h = plt.hist(smoothed_vm.ravel(), bins='auto');
from fast_histogram import histogram1d as hist
values = hist(smoothed_vm.ravel(), bins=255)
values = hist(smoothed_vm.ravel(), range=[0, 1], bins=255)
plt.plot(values)
np.max(smoothed_vm)
b = smoothed_vm > 0.5
get_ipython().run_line_magic('pwd', '')
sys.path.append('/Users/jni/projects/mpl-volume-viewer/')
import slice_view as sv
view = sv.SliceViewer(b)
np.bincount(ndi.label(v).ravel())
np.bincount(ndi.label(b).ravel())
np.bincount(ndi.label(b)[0].ravel())
b2 = morphology.remove_small_objects(b, 10000)
sys.path.append('/Users/jni/projects/skan')
sys.path.append('/Users/jni/projects/unfold-embryo')
import unfold
g, idxs, path = unfold.define_mesoderm_axis(b2, spacing=0.3)
reload(unfold)
g, idxs, path = unfold.define_mesoderm_axis(b2, spacing=0.3)
get_ipython().run_line_magic('debug', '')
reload(unfold)
g, idxs, path = unfold.define_mesoderm_axis(b2, spacing=0.3)
np.max(idxs)
np.shape(g)
np.shape(idxs)
idxs[0]
idxs[3396]
np.max(path)
bflat = np.max(b2, axis=0)
plt.imshow(bflat)
idxs_path = idxs[:, 1:3][path]
plt.plot(idxs_path[:, 1], idxs_path[:, 0])
len(path)
values = [4.9, 3.0, 99.1]
values = np.array(values)
values[[[0, 0, 1],
[2, 1, 1],
[2, 2, 1]]]
indices = np.array([[0, 0, 1], [2, 1, 1], [2, 2, 1]])
values[indices]
idxs_path_smoothed = ndi.gaussian_filter(idxs_path, sigma=(10, 0), mode='reflect')
plt.plot(idxs_path_smoothed[:, 1], idxs_path_smoothed[:, 0])
idxs_path_smoothed = ndi.gaussian_filter(idxs_path, sigma=(50, 0), mode='reflect')
plt.plot(idxs_path_smoothed[:, 1], idxs_path_smoothed[:, 0])
get_ipython().run_line_magic('pinfo', 'ndi.gaussian_filter')
idxs_path_smoothed = ndi.gaussian_filter(idxs_path, sigma=(50, 0), mode='nearest')
plt.plot(idxs_path[:, 1], idxs_path[:, 0])
plt.plot(idxs_path_smoothed[:, 1], idxs_path_smoothed[:, 0])
|
jni/useful-histories
|
smoothing-paths.py
|
Python
|
bsd-3-clause
| 2,475
|
[
"Gaussian"
] |
38abc69765a57fc85e3aa00e3a79dd502f158bdfc3a13234095151f1bad7c135
|
##
# title: BreadInterface.Lifecycle
# by: Brian Kim
# description: the set of methods that define
# the stages of existence for a BreadInterface object
#
##
# an interface that defines the life cycle of a breadinterface component
#
class Lifecycle():
def __init__( self ):
pass
def start( self ):
pass
def update( self ):
pass
def clear( self ):
pass
def stop( self ):
pass
def cleanup( self ):
pass
|
briansan/BreadInterface
|
py/BreadInterface/Lifecycle.py
|
Python
|
bsd-2-clause
| 450
|
[
"Brian"
] |
40cc0fa20a72f98fb1b0c2e5d578fab52ac4b19b40f1d4bdabf5c0408c159425
|
import numpy as np
import os
import sys
import traceback
import pyneb as pn
#from scipy.linalg import solve as solve_sc
from numpy.linalg import solve as solve_np
try:
import cvxopt
cvxopt_ok = True
except:
cvxopt_ok = False
"""
from scipy.sparse.linalg import spsolve as solve_sp
try:
from numba import double
from numba.decorators import jit, autojit
except:
pass
try:
import rpy2.robjects.numpy2ri
from rpy2.robjects.packages import importr
rpy2.robjects.numpy2ri.activate()
base = importr('base')
rpy_ok = True
except:
rpy_ok = False
"""
# It seems we do not need anymore bs to deal with strings between py2 and py3
if sys.version_info.major < 3:
def bs(x):
return x
else:
def bs(x):
if isinstance(x, bytes):
return x.decode(encoding='UTF-8')
elif isinstance(x, str):
return x.encode(encoding='UTF-8')
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
def execution_path(filename):
return os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), filename)
def _returnNone(*argv, **kwargs):
return None
def int_to_roman(input_):
"""
Convert an integer to Roman numerals.
Examples:
>>> int_to_roman(0)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(-1)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(1.5)
Traceback (most recent call last):
TypeError: expected integer, got <type 'float'>
>>> print int_to_roman(2000)
MM
>>> print int_to_roman(1999)
MCMXCIX
"""
if type(input_) != type(1):
#raise TypeError, "expected integer, got %s" % type(input_)
return None
if not 0 < input_ < 4000:
#raise ValueError, "Argument must be between 1 and 3999"
return None
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
result = ""
for i in range(len(ints)):
count = int(input_ / ints[i])
result += nums[i] * count
input_ -= ints[i] * count
return result
def roman_to_int(input_):
"""
Convert a roman numeral to an integer.
>>> r = range(1, 4000)
>>> nums = [int_to_roman(i) for i in r]
>>> ints = [roman_to_int(n) for n in nums]
>>> print r == ints
1
>>> roman_to_int('VVVIV')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: VVVIV
>>> roman_to_int(1)
Traceback (most recent call last):
...
TypeError: expected string, got <type 'int'>
>>> roman_to_int('a')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: A
>>> roman_to_int('IL')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: IL
"""
if type(input_) != type(""):
#raise TypeError, "expected string, got %s" % type(input_)
return None
input_ = input_.upper()
nums = ['M', 'D', 'C', 'L', 'X', 'V', 'I']
ints = [1000, 500, 100, 50, 10, 5, 1]
places = []
for c in input_:
if not c in nums:
#raise ValueError, "input is not a valid roman numeral: %s" % input_
return None
for i in range(len(input_)):
c = input_[i]
value = ints[nums.index(c)]
# If the next place holds a larger number, this value is negative.
try:
nextvalue = ints[nums.index(input_[i + 1])]
if nextvalue > value:
value *= -1
except IndexError:
# there is no next place.
pass
places.append(value)
sum_ = 0
for n in places: sum_ += n
# Easiest test for validity...
if int_to_roman(sum_) == input_:
return sum_
else:
#raise ValueError, 'input is not a valid roman numeral: %s' % input_
return None
def parseAtom(atom):
'''
Parses an atom label into the element and spectrum parts
'''
iso = ''
elem = ''
spec = ''
cont = True
firstdigit = True
for l in atom:
if l.isalpha() and cont:
elem += l
firstdigit = False
elif l.isdigit():
if firstdigit:
iso += l
else:
spec += l
cont = False
return iso+str.capitalize(elem), spec
def parseAtom2(atom):
'''
Parses an atom label into the element and spectrum parts
'''
iso = ''
elem = ''
spec = ''
cont = True
firstdigit = True
for l in atom:
if l.isalpha() and cont:
elem += l
firstdigit = False
elif l.isdigit():
if firstdigit:
iso += l
else:
spec += l
cont = False
if atom[-1] == 'r':
rec = 'r'
else:
rec = ''
return iso+str.capitalize(elem), spec, rec
def strExtract(text, par1=None, par2=None):
"""
Extract a substring from text (first parameter)
If par1 is a string, the extraction starts after par1,
else if it is an integer, it starts at position par1.
If par 2 is a string, extraction stops at par2,
else if par2 is an integer, extraction stops after par2 characters.
Examples:
strExtract('test123','e','1')
strExtract('test123','st',4)
"""
if np.size(text) == 1:
if type(par1) is int:
str1 = text[par1::]
elif type(par1) is str:
str1 = text.split(par1)[-1]
else:
str1 = text
if type(par2) is int:
str2 = str1[0:par2]
elif type(par2) is str:
str2 = str1.split(par2)[0]
else:
str2 = str1
return str2
else:
res = []
for subtext in text:
res1 = strExtract(subtext, par1=par1, par2=par2)
if res1 != '':
res.append(res1)
return res
def formatExceptionInfo(maxTBlevel=5):
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
try:
excArgs = exc.__dict__["args"]
except KeyError:
excArgs = "<no args>"
excTb = traceback.format_tb(trbk, maxTBlevel)
return (excName, excArgs, excTb)
def multi_split(s, seps):
res = [s]
for sep in seps:
s, res = res, []
for seq in s:
res += seq.split(sep)
return res
def cleanPypicFiles(files=None, all_=False, dir_=None):
"""
Method to clean the directory containing the pypics files.
Parameters:
- files list of files to be removed
- all_ Boolean, is set to True, all the files are deleted from the directory
- dir_ directory from where the files are removed.
If None (default), pn.config.pypic_path is used.
"""
if dir_ is None:
dir_ = pn.config.pypic_path
if all_:
files = os.listdir(dir_)
if np.ndim(files) == 0:
files = [files]
if files is None:
return
if type(files) == type(''):
files = [files]
for file_ in files:
file_path = os.path.join(dir_, file_)
try:
if os.path.isfile(file_path):
os.remove(file_path)
pn.log_.message('Deleting {0}'.format(file_path), calling='cleanPypicFiles')
except:
pn.log_.warn('Unable to remove {0}'.format(file_path), calling='cleanPypicFiles')
def getPypicFiles(dir_=None):
"""
Return the list of files in the directory.
Parameters:
- dir_ directory from where the files are removed.
If None (default), pn.pypic_path is used.
"""
if dir_ is None:
dir_ = pn.config.pypic_path
files = os.listdir(dir_)
return files
def revert_seterr(oldsettings):
"""
This function revert the options of seterr to a value saved in oldsettings.
Usage:
oldsettings = np.seterr(all='ignore')
to_return = (result - int_ratio) / int_ratio # this will not issue Warning messages
revert_seterr(oldsettings)
Parameters:
oldsettings result of np.seterr(all='ignore')
"""
np.seterr(over=oldsettings['over'])
np.seterr(divide=oldsettings['divide'])
np.seterr(invalid=oldsettings['invalid'])
np.seterr(under=oldsettings['under'])
def quiet_divide(a, b):
"""
This function returns the division of a by b, without any waring in case of b beeing 0.
"""
oldsettings = np.seterr(all='ignore')
to_return = a / b # this will not issue Warning messages
revert_seterr(oldsettings)
return to_return
def quiet_log10(a):
"""
This function returns the log10 of a, without any waring in case of b beeing 0.
"""
oldsettings = np.seterr(all='ignore')
to_return = np.log10(a) # this will not issue Warning messages
revert_seterr(oldsettings)
return to_return
def get_reduced(N_rand, a, value_method = 'original', error_method='std'):
"""
This function returns a tuple of value and error corresponding to an array of values
obtained from a MonteCarlo computation
It takes as argument an array that contains the original value,
followed by N_rand MonteCarlo values.
The relevant value is computed by returning the original value (default), the mean or the median,
depending on "value_method"
The errors are computed by 68% quantiles or standart deviation (Default),
depending on "error_method"
"""
if error_method == 'quants' and not pn.config.INSTALLED['scipy']:
pn.log_.error('Scipy not installed, use_quants not available', calling = 'get_reduced')
else:
from scipy.stats.mstats import mquantiles
if value_method == 'original':
value = a[0]
elif value_method == 'mean':
value = a.mean()
elif value_method == 'median':
value = np.median(a)
if error_method == 'quants':
quants = mquantiles(a, [0.16, 0.84])
error = (quants[1]-quants[0])/2.
elif error_method == 'uquants':
quants = mquantiles(a, [0.16, 0.84])
error = quants[1] - value
elif error_method == 'lquants':
quants = mquantiles(a, [0.16, 0.84])
error = value - quants[0]
# error = (quants[1]-quants[0])/2)
elif error_method == 'upper':
mask = (a - value) >= 0
error = ((((a[mask] - value)**2).sum())/np.float(mask.sum()))**0.5
elif error_method == 'lower':
mask = (a - value) <= 0
error = -((((a[mask] - value)**2).sum())/np.float(mask.sum()))**0.5
elif error_method == 'std':
error = a.std()
else:
pn.log_.error('Unknown error method {}'.format(error_method))
return (value, error)
def get_reduced_dic(N_rand, n_obs_ori, dic, value_method = 'original',
error_method='std', abund12 = False):
"""
This function returns a dictionary of values (ionic or atomic abundances, temp...) and errors.
It takes as argument a dictionary that is supposed to contains n_obs_ori values from
the original observations, followed (optionaly) by N_rand*n_obs_ori MonteCarlo values.
The new values are computed by returning the original value (default), the mean or the median,
depending on "value_method"
The errors are computed by 68% quantiles or standart deviation (Default), depending on on "error_method"
It also transforms the abundances by number into 12+log10(abundances by number)
"""
res = {}
for key in dic.keys():
value = []
error = []
for i in np.arange(n_obs_ori):
if abund12:
values = 12 + np.log10(dic[key][n_obs_ori+i*N_rand:n_obs_ori+(i+1)*N_rand])
values = np.insert(values, 0, 12 + np.log10(dic[key][i]))
else:
values = dic[key][n_obs_ori+i*N_rand:n_obs_ori+(i+1)*N_rand]
values = np.insert(values, 0, dic[key][i])
tt = np.isfinite(values)
if tt.sum() == 0:
value.append(np.NAN)
error.append(0.0)
else:
v, e = get_reduced(N_rand, values[tt], value_method = value_method,
error_method=error_method)
value.append(v)
error.append(e)
res[key] = np.array(value)
res['{0}_e'.format(key)] = np.array(error)
return res
def addRand(N, list_, list_errors=None, lowlim=None):
"""
This function adds MonteCarlo random-gauss values to a list.
Parameters:
N: number of MonteCarlo values to be added
list_: list of input values
list_errors: list of errors associated to the values in list_. The errors are absolutes, the
result will be a gaussian distribution of sigma=list_errors.
If None, the functions adds N time the same values to the list
lowlim: if not None (default), any random-gauss number lower then the lowlim value is set to lowlim.
Usage:
print addRand(3, [1,2,3]) # no errors: no random values, only replicates the values
[1, 2, 3, 1, 1, 1, 2, 2, 2, 3, 3, 3]
print addRand(3, [1,20,300], [1, 1, 0.1])
[1, 20, 300,
1.550094377016822, 1.868307356917796, 1.0242090163674675,
19.782857703031276, 19.049474190752157, 21.58680361755194,
299.99810384362934, 300.00753905080154, 299.94052054137694]
"""
# Check that the 2nd argument is a list
if type(list_) != type([]):
pn.log_.error('The second argument must be a list', calling = 'addRand')
new_list = list_[:]
# Computes the new values
if N != 0:
# initialize the new list with the values of list_
to_extend = []
for i in range(len(list_)):
if list_errors is None:
to_extend.extend([list_[i]] * N)
else:
to_extend.extend(np.random.standard_normal(N) * list_errors[i] + list_[i])
# filter the values lower than lowlim
if lowlim is not None:
to_extend = [value if value > lowlim else lowlim for value in to_extend]
new_list.extend(to_extend)
return new_list
"""
def solve_r(a, b):
if rpy_ok:
return base.solve(a, b)
else:
return None
def solve_lapack(a, b):
from numpy.linalg import lapack_lite
n_eq = a.shape[0]
n_rhs = b.shape[0]
pivots = np.zeros(n_eq, np.intc)
results = lapack_lite.dgesv(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
return results
"""
if cvxopt_ok:
def solve_cvxopt(a, b):
A = cvxopt.matrix(a)
B = cvxopt.matrix(b)
return cvxopt.lapack.gesv(A, B)
"""
# @profile
if cvxopt_ok:
@profile
def solve(a, b):
return solve_cvxopt(a, b)
else:
@profile
def solve(a, b):
return solve_np(a,b)
"""
#@profile
def solve(a, b):
return solve_np(a,b)
|
Morisset/PyNeb_devel
|
pyneb/utils/misc.py
|
Python
|
gpl-3.0
| 15,313
|
[
"Gaussian"
] |
08bf82a0d21963c19d7577c5b1dc4754b1f4fcf5a830bd2a0d9c8116339d67c5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines tools to generate and analyze phase diagrams.
"""
import re
import collections
import itertools
import math
import logging
from monty.json import MSONable, MontyDecoder
from functools import lru_cache
import numpy as np
from scipy.spatial import ConvexHull
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element, DummySpecie, get_el_sp
from pymatgen.util.coord import Simplex, in_coord_list
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.analysis.reaction_calculator import Reaction, \
ReactionError
from pymatgen.entries import Entry
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2011"
logger = logging.getLogger(__name__)
class PDEntry(Entry):
"""
An object encompassing all relevant data for phase diagrams.
.. attribute:: composition
The composition associated with the PDEntry.
.. attribute:: energy
The energy associated with the entry.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
.. attribute:: attribute
A arbitrary attribute. Can be used to specify that the entry is a newly
found compound, or to specify a particular label for the entry, etc.
An attribute can be anything but must be MSONable.
"""
def __init__(self, composition: Composition, energy: float,
name: str = None, attribute: object = None):
"""
Args:
composition (Composition): Composition
energy (float): Energy for composition.
name (str): Optional parameter to name the entry. Defaults
to the reduced chemical formula.
attribute: Optional attribute of the entry. Must be MSONable.
"""
super().__init__(composition, energy)
self.name = name if name else self.composition.reduced_formula
self.attribute = attribute
@property
def energy(self) -> float:
"""
:return: the energy of the entry.
"""
return self._energy
def __repr__(self):
return "PDEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def as_dict(self):
"""
:return: MSONable dict.
"""
return_dict = super().as_dict()
return_dict.update({"name": self.name,
"attribute": self.attribute})
return return_dict
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.as_dict() == other.as_dict()
else:
return False
def __hash__(self):
return id(self)
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: PDEntry
"""
return cls(Composition(d["composition"]), d["energy"],
d["name"] if "name" in d else None,
d["attribute"] if "attribute" in d else None)
class GrandPotPDEntry(PDEntry):
"""
A grand potential pd entry object encompassing all relevant data for phase
diagrams. Chemical potentials are given as a element-chemical potential
dict.
"""
def __init__(self, entry, chempots, name=None):
"""
Args:
entry: A PDEntry-like object.
chempots: Chemical potential specification as {Element: float}.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula of the original entry.
"""
comp = entry.composition
self.original_entry = entry
self.original_comp = comp
grandpot = entry.energy - sum([comp[el] * pot
for el, pot in chempots.items()])
self.chempots = chempots
new_comp_map = {el: comp[el] for el in comp.elements
if el not in chempots}
super().__init__(new_comp_map, grandpot, entry.name)
self.name = name if name else entry.name
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.original_comp.is_element
def __repr__(self):
chempot_str = " ".join(["mu_%s = %.4f" % (el, mu)
for el, mu in self.chempots.items()])
return "GrandPotPDEntry with original composition " + \
"{}, energy = {:.4f}, {}".format(self.original_entry.composition,
self.original_entry.energy,
chempot_str)
def __str__(self):
return self.__repr__()
def as_dict(self):
"""
:return: MSONAble dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"chempots": {el.symbol: u for el, u in self.chempots.items()},
"name": self.name}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: PDStructureEntry
"""
chempots = {Element(symbol): u for symbol, u in d["chempots"].items()}
entry = MontyDecoder().process_decoded(d["entry"])
return cls(entry, chempots, d["name"])
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
class TransformedPDEntry(PDEntry):
"""
This class repesents a TransformedPDEntry, which allows for a PDEntry to be
transformed to a different composition coordinate space. It is used in the
construction of phase diagrams that do not have elements as the terminal
compositions.
"""
def __init__(self, comp, original_entry):
"""
Args:
comp (Composition): Transformed composition as a Composition.
original_entry (PDEntry): Original entry that this entry arose from.
"""
super().__init__(comp, original_entry.energy)
self.original_entry = original_entry
self.name = original_entry.name
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
def __repr__(self):
output = ["TransformedPDEntry {}".format(self.composition),
" with original composition {}".format(self.original_entry.composition),
", E = {:.4f}".format(self.original_entry.energy)]
return "".join(output)
def __str__(self):
return self.__repr__()
def as_dict(self):
"""
:return: MSONable dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"composition": self.composition}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: TransformedPDEntry
"""
entry = MontyDecoder().process_decoded(d["entry"])
return cls(d["composition"], entry)
class PhaseDiagram(MSONable):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
.. attribute: elements:
Elements in the phase diagram.
..attribute: all_entries
All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
.. attribute: qhull_data
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
.. attribute: qhull_entries:
Actual entries used in convex hull. Excludes all positive formation
energy entries.
.. attribute: dim
The dimensionality of the phase diagram.
.. attribute: facets
Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...].
For a ternary, it is the indices (references to qhull_entries and
qhull_data) for the vertices of the phase triangles. Similarly
extended to higher D simplices for higher dimensions.
.. attribute: el_refs:
List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
.. attribute: simplices:
The simplices of the phase diagram as a list of np.ndarray, i.e.,
the list of stable compositional coordinates in the phase diagram.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
numerical_tol = 1e-8
def __init__(self, entries, elements=None):
"""
Standard constructor for phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves and are sorted alphabetically.
If specified, element ordering (e.g. for pd coordinates)
is preserved.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = sorted(list(elements))
elements = list(elements)
dim = len(elements)
entries = sorted(entries, key=lambda e: e.composition.reduced_composition)
el_refs = {}
min_entries = []
all_entries = []
for c, g in itertools.groupby(entries, key=lambda e: e.composition.reduced_composition):
g = list(g)
min_entry = min(g, key=lambda e: e.energy_per_atom)
if c.is_element:
el_refs[c.elements[0]] = min_entry
min_entries.append(min_entry)
all_entries.extend(g)
if len(el_refs) != dim:
raise PhaseDiagramError(
"There are no entries associated with a terminal element!.")
data = np.array([
[e.composition.get_atomic_fraction(el) for el in elements] + [
e.energy_per_atom]
for e in min_entries
])
# Use only entries with negative formation energy
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
inds = np.where(form_e < -self.formation_energy_tol)[0].tolist()
# Add the elemental references
inds.extend([min_entries.index(el) for el in el_refs.values()])
qhull_entries = [min_entries[i] for i in inds]
qhull_data = data[inds][:, 1:]
# Add an extra point to enforce full dimensionality.
# This point will be present in all upper hull facets.
extra_point = np.zeros(dim) + 1 / dim
extra_point[-1] = np.max(qhull_data) + 1
qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)
if dim == 1:
self.facets = [qhull_data.argmin(axis=0)]
else:
facets = get_facets(qhull_data)
finalfacets = []
for facet in facets:
# Skip facets that include the extra point
if max(facet) == len(qhull_data) - 1:
continue
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-14:
finalfacets.append(facet)
self.facets = finalfacets
self.simplexes = [Simplex(qhull_data[f, :-1]) for f in self.facets]
self.all_entries = all_entries
self.qhull_data = qhull_data
self.dim = dim
self.el_refs = el_refs
self.elements = elements
self.qhull_entries = qhull_entries
self._stable_entries = set(self.qhull_entries[i] for i in
set(itertools.chain(*self.facets)))
def pd_coords(self, comp):
"""
The phase diagram is generated in a reduced dimensional space
(n_elements - 1). This function returns the coordinates in that space.
These coordinates are compatible with the stored simplex objects.
"""
if set(comp.elements).difference(self.elements):
raise ValueError('{} has elements not in the phase diagram {}'
''.format(comp, self.elements))
return np.array(
[comp.get_atomic_fraction(el) for el in self.elements[1:]])
@property
def all_entries_hulldata(self):
"""
:return: The actual ndarray used to construct the convex hull.
"""
data = []
for entry in self.all_entries:
comp = entry.composition
row = [comp.get_atomic_fraction(el) for el in self.elements]
row.append(entry.energy_per_atom)
data.append(row)
return np.array(data)[:, 1:]
@property
def unstable_entries(self):
"""
Entries that are unstable in the phase diagram. Includes positive
formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def stable_entries(self):
"""
Returns the stable entries in the phase diagram.
"""
return self._stable_entries
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry: A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
c = entry.composition
return entry.energy - sum([c[el] * self.el_refs[el].energy_per_atom
for el in c.elements])
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry: An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
return self.get_form_energy(entry) / entry.composition.num_atoms
def __repr__(self):
return self.__str__()
def __str__(self):
symbols = [el.symbol for el in self.elements]
output = ["{} phase diagram".format("-".join(symbols)),
"{} stable phases: ".format(len(self.stable_entries)),
", ".join([entry.name
for entry in self.stable_entries])]
return "\n".join(output)
def as_dict(self):
"""
:return: MSONAble dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: PhaseDiagram
"""
entries = [MontyDecoder().process_decoded(dd) for dd in d["all_entries"]]
elements = [Element.from_dict(dd) for dd in d["elements"]]
return cls(entries, elements)
@lru_cache(1)
def _get_facet_and_simplex(self, comp):
"""
Get any facet that a composition falls into. Cached so successive
calls at same composition are fast.
"""
c = self.pd_coords(comp)
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
return f, s
raise RuntimeError("No facet found for comp = {}".format(comp))
def _get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
complist = [self.qhull_entries[i].composition for i in facet]
energylist = [self.qhull_entries[i].energy_per_atom for i in facet]
m = [[c.get_atomic_fraction(e) for e in self.elements] for c in
complist]
chempots = np.linalg.solve(m, energylist)
return dict(zip(self.elements, chempots))
def get_decomposition(self, comp):
"""
Provides the decomposition at a particular composition.
Args:
comp: A composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
return {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
e = 0
for k, v in self.get_decomposition(comp).items():
e += k.energy_per_atom * v
return e * comp.num_atoms
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
"""
if entry in self.stable_entries:
return {entry: 1}, 0
comp = entry.composition
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
decomp = {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
energies = [self.qhull_entries[i].energy_per_atom for i in facet]
ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)
if allow_negative or ehull >= -PhaseDiagram.numerical_tol:
return decomp, ehull
raise ValueError("No valid decomp found!")
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry: A PDEntry like object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
def get_equilibrium_reaction_energy(self, entry):
"""
Provides the reaction energy of a stable entry from the neighboring
equilibrium stable entries (also known as the inverse distance to
hull).
Args:
entry: A PDEntry like object
Returns:
Equilibrium reaction energy of entry. Stable entries should have
equilibrium reaction energy <= 0.
"""
if entry not in self.stable_entries:
raise ValueError("Equilibrium reaction energy is available only "
"for stable entries.")
if entry.is_element:
return 0
entries = [e for e in self.stable_entries if e != entry]
modpd = PhaseDiagram(entries, self.elements)
return modpd.get_decomp_and_e_above_hull(entry,
allow_negative=True)[1]
def get_composition_chempots(self, comp):
"""
Get the chemical potentials for all elements at a given composition.
:param comp: Composition
:return: Dict of chemical potentials.
"""
facet = self._get_facet_and_simplex(comp)[0]
return self._get_facet_chempots(facet)
def get_all_chempots(self, comp):
"""
Get chemical potentials at a given compositon.
:param comp: Composition
:return: Chemical potentials.
"""
# note the top part takes from format of _get_facet_and_simplex,
# but wants to return all facets rather than the first one that meets this criteria
c = self.pd_coords(comp)
allfacets = []
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
allfacets.append(f)
if not len(allfacets):
raise RuntimeError("No facets found for comp = {}".format(comp))
else:
chempots = {}
for facet in allfacets:
facet_elt_list = [self.qhull_entries[j].name for j in facet]
facet_name = '-'.join(facet_elt_list)
chempots[facet_name] = self._get_facet_chempots(facet)
return chempots
def get_transition_chempots(self, element):
"""
Get the critical chemical potentials for an element in the Phase
Diagram.
Args:
element: An element. Has to be in the PD in the first place.
Returns:
A sorted sequence of critical chemical potentials, from less
negative to more negative.
"""
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with "
"elements in the phase diagram.")
critical_chempots = []
for facet in self.facets:
chempots = self._get_facet_chempots(facet)
critical_chempots.append(chempots[element])
clean_pots = []
for c in sorted(critical_chempots):
if len(clean_pots) == 0:
clean_pots.append(c)
else:
if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol:
clean_pots.append(c)
clean_pots.reverse()
return tuple(clean_pots)
def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self.elements
# the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self.pd_coords(comp1)
c2 = self.pd_coords(comp2)
# none of the projections work if c1 == c2, so just return *copies*
# of the inputs
if np.all(c1 == c2):
return [comp1.copy(), comp2.copy()]
intersections = [c1, c2]
for sc in self.simplexes:
intersections.extend(sc.line_intersection(c1, c2))
intersections = np.array(intersections)
# find position along line
l = (c2 - c1)
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[np.logical_and(proj > -self.numerical_tol,
proj < proj[1] + self.numerical_tol)]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=np.bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,
ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]
def get_element_profile(self, element, comp, comp_tol=1e-5):
"""
Provides the element evolution data for a composition.
For example, can be used to analyze Li conversion voltages by varying
uLi and looking at the phases formed. Also can be used to analyze O2
evolution by varying uO2.
Args:
element: An element. Must be in the phase diagram.
comp: A Composition
comp_tol: The tolerance to use when calculating decompositions.
Phases with amounts less than this tolerance are excluded.
Defaults to 1e-5.
Returns:
Evolution data as a list of dictionaries of the following format:
[ {'chempot': -10.487582010000001, 'evolution': -2.0,
'reaction': Reaction Object], ...]
"""
element = get_el_sp(element)
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with"
" elements in the phase diagram.")
gccomp = Composition({el: amt for el, amt in comp.items()
if el != element})
elref = self.el_refs[element]
elcomp = Composition(element.symbol)
evolution = []
for cc in self.get_critical_compositions(elcomp, gccomp)[1:]:
decomp_entries = self.get_decomposition(cc).keys()
decomp = [k.composition for k in decomp_entries]
rxn = Reaction([comp], decomp + [elcomp])
rxn.normalize_to(comp)
c = self.get_composition_chempots(cc + elcomp * 1e-5)[element]
amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]
evolution.append({'chempot': c,
'evolution': amt,
'element_reference': elref,
'reaction': rxn, 'entries': decomp_entries})
return evolution
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
all_chempots = []
pd = self
facets = pd.facets
for facet in facets:
chempots = self._get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in pd.elements])
inds = [pd.elements.index(el) for el in elements]
el_energies = {el: 0.0 for el in elements}
if referenced:
el_energies = {el: pd.el_refs[el].energy_per_atom
for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self.elements)))]
if len(all_chempots) > len(self.elements):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = facets[combi[0]]
data2 = facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if len(common_ent_ind) == len(elements):
common_entries = [pd.qhull_entries[i]
for i in common_ent_ind]
data = np.array([[all_chempots[i][j]
- el_energies[pd.elements[j]]
for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):
"""
returns a set of chemical potentials corresponding to the vertices of
the simplex in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition
except dep_elt.
The chemical potential of dep_elt is computed from the target
composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed
from the energy of
the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element:mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != dep_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != dep_elt])
for e in self.elements:
if e not in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != dep_elt]
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[dep_elt] / target_comp[dep_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
elts = [e for e in self.elements if e != dep_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = v[i] + muref[i]
res[dep_elt] = (np.dot(v + muref, coeff) + ef) / target_comp[dep_elt]
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if abs(di[k] - res[k]) > tol_en:
dict_equals = False
break
if dict_equals:
already_in = True
break
if not already_in:
all_coords.append(res)
return all_coords
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials corresponding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != open_elt])
for e in self.elements:
if e not in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / target_comp[open_elt]
max_mus = v
if (np.dot(v + muref, coeff) + ef) / target_comp[open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / target_comp[open_elt]
min_mus = v
elts = [e for e in self.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u_X N_X
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
chempots {Element: float}: Specify the chemical potentials
of the open elements.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
self.chempots = {get_el_sp(el): u for el, u in chempots.items()}
elements = set(elements).difference(self.chempots.keys())
all_entries = []
for e in entries:
if len(set(e.composition.elements).intersection(set(elements))) > 0:
all_entries.append(GrandPotPDEntry(e, self.chempots))
super().__init__(all_entries, elements)
def __str__(self):
output = []
chemsys = "-".join([el.symbol for el in self.elements])
output.append("{} grand potential phase diagram with ".format(chemsys))
output[-1] += ", ".join(["u{}={}".format(el, v)
for el, v in self.chempots.items()])
output.append("{} stable phases: ".format(len(self.stable_entries)))
output.append(", ".join([entry.name
for entry in self.stable_entries]))
return "\n".join(output)
def as_dict(self):
"""
:return: MSONable dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"chempots": self.chempots,
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: GrandPotentialPhaseDiagram
"""
entries = MontyDecoder().process_decoded(d["all_entries"])
elements = MontyDecoder().process_decoded(d["elements"])
return cls(entries, d["chempots"], elements)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions,
normalize_terminal_compositions=True):
"""
Initializes a CompoundPhaseDiagram.
Args:
entries ([PDEntry]): Sequence of input entries. For example,
if you want a Li2O-P2O5 phase diagram, you might have all
Li-P-O entries as an input.
terminal_compositions ([Composition]): Terminal compositions of
phase space. In the Li2O-P2O5 example, these will be the
Li2O and P2O5 compositions.
normalize_terminal_compositions (bool): Whether to normalize the
terminal compositions to a per atom basis. If normalized,
the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = \
self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
super().__init__(
pentries, elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries: Sequence of all input entries
terminal_compositions: Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
fractional_comp = [c.fractional_composition
for c in terminal_compositions]
else:
fractional_comp = terminal_compositions
# Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(fractional_comp):
sp_mapping[comp] = DummySpecie("X" + chr(102 + i))
for entry in entries:
try:
rxn = Reaction(fractional_comp, [entry.composition])
rxn.normalize_to(entry.composition)
# We only allow reactions that have positive amounts of
# reactants.
if all([rxn.get_coeff(comp) <= CompoundPhaseDiagram.amount_tol
for comp in fractional_comp]):
newcomp = {sp_mapping[comp]: -rxn.get_coeff(comp)
for comp in fractional_comp}
newcomp = {k: v for k, v in newcomp.items()
if v > CompoundPhaseDiagram.amount_tol}
transformed_entry = \
TransformedPDEntry(Composition(newcomp), entry)
new_entries.append(transformed_entry)
except ReactionError:
# If the reaction can't be balanced, the entry does not fall
# into the phase space. We ignore them.
pass
return new_entries, sp_mapping
def as_dict(self):
"""
:return: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"original_entries": [e.as_dict() for e in self.original_entries],
"terminal_compositions": [c.as_dict()
for c in self.terminal_compositions],
"normalize_terminal_compositions":
self.normalize_terminals}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict Representation
:return: CompoundPhaseDiagram
"""
dec = MontyDecoder()
entries = dec.process_decoded(d["original_entries"])
terminal_compositions = dec.process_decoded(d["terminal_compositions"])
return cls(entries, terminal_compositions,
d["normalize_terminal_compositions"])
class ReactionDiagram:
"""
Analyzes the possible reactions between a pair of compounds, e.g.,
an electrolyte and an electrode.
"""
def __init__(self, entry1, entry2, all_entries, tol=1e-4,
float_fmt="%.4f"):
"""
Args:
entry1 (ComputedEntry): Entry for 1st component. Note that
corrections, if any, must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
entry2 (ComputedEntry): Entry for 2nd component. Note that
corrections must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
all_entries ([ComputedEntry]): All other entries to be
considered in the analysis. Note that corrections, if any,
must already be pre-applied.
tol (float): Tolerance to be used to determine validity of reaction.
float_fmt (str): Formatting string to be applied to all floats.
Determines number of decimal places in reaction string.
"""
elements = set()
for e in [entry1, entry2]:
elements.update([el.symbol for el in e.composition.elements])
elements = tuple(elements) # Fix elements to ensure order.
comp_vec1 = np.array([entry1.composition.get_atomic_fraction(el)
for el in elements])
comp_vec2 = np.array([entry2.composition.get_atomic_fraction(el)
for el in elements])
r1 = entry1.composition.reduced_composition
r2 = entry2.composition.reduced_composition
logger.debug("%d total entries." % len(all_entries))
pd = PhaseDiagram(all_entries + [entry1, entry2])
terminal_formulas = [entry1.composition.reduced_formula,
entry2.composition.reduced_formula]
logger.debug("%d stable entries" % len(pd.stable_entries))
logger.debug("%d facets" % len(pd.facets))
logger.debug("%d qhull_entries" % len(pd.qhull_entries))
rxn_entries = []
done = []
def fmt(fl):
return float_fmt % fl
for facet in pd.facets:
for face in itertools.combinations(facet, len(facet) - 1):
face_entries = [pd.qhull_entries[i] for i in face]
if any([e.composition.reduced_formula in terminal_formulas
for e in face_entries]):
continue
try:
m = []
for e in face_entries:
m.append([e.composition.get_atomic_fraction(el)
for el in elements])
m.append(comp_vec2 - comp_vec1)
m = np.array(m).T
coeffs = np.linalg.solve(m, comp_vec2)
x = coeffs[-1]
if all([c >= -tol for c in coeffs]) and \
(abs(sum(coeffs[:-1]) - 1) < tol) and \
(tol < x < 1 - tol):
c1 = x / r1.num_atoms
c2 = (1 - x) / r2.num_atoms
factor = 1 / (c1 + c2)
c1 *= factor
c2 *= factor
# Avoid duplicate reactions.
if any([np.allclose([c1, c2], cc) for cc in done]):
continue
done.append((c1, c2))
rxn_str = "%s %s + %s %s -> " % (
fmt(c1), r1.reduced_formula,
fmt(c2), r2.reduced_formula)
products = []
product_entries = []
energy = - (x * entry1.energy_per_atom +
(1 - x) * entry2.energy_per_atom)
for c, e in zip(coeffs[:-1], face_entries):
if c > tol:
r = e.composition.reduced_composition
products.append("%s %s" % (
fmt(c / r.num_atoms * factor),
r.reduced_formula))
product_entries.append((c, e))
energy += c * e.energy_per_atom
rxn_str += " + ".join(products)
comp = x * comp_vec1 + (1 - x) * comp_vec2
entry = PDEntry(
Composition(dict(zip(elements, comp))),
energy=energy, attribute=rxn_str)
entry.decomposition = product_entries
rxn_entries.append(entry)
except np.linalg.LinAlgError:
logger.debug("Reactants = %s" % (", ".join([
entry1.composition.reduced_formula,
entry2.composition.reduced_formula])))
logger.debug("Products = %s" % (
", ".join([e.composition.reduced_formula
for e in face_entries])))
rxn_entries = sorted(rxn_entries, key=lambda e: e.name, reverse=True)
self.entry1 = entry1
self.entry2 = entry2
self.rxn_entries = rxn_entries
self.labels = collections.OrderedDict()
for i, e in enumerate(rxn_entries):
self.labels[str(i + 1)] = e.attribute
e.name = str(i + 1)
self.all_entries = all_entries
self.pd = pd
def get_compound_pd(self):
"""
Get the CompoundPhaseDiagram object, which can then be used for
plotting.
Returns:
(CompoundPhaseDiagram)
"""
# For this plot, since the reactions are reported in formation
# energies, we need to set the energies of the terminal compositions
# to 0. So we make create copies with 0 energy.
entry1 = PDEntry(self.entry1.composition, 0)
entry2 = PDEntry(self.entry2.composition, 0)
cpd = CompoundPhaseDiagram(
self.rxn_entries + [entry1, entry2],
[Composition(entry1.composition.reduced_formula),
Composition(entry2.composition.reduced_formula)],
normalize_terminal_compositions=False)
return cpd
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
def get_facets(qhull_data, joggle=False):
"""
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
List of simplices of the Convex Hull.
"""
if joggle:
return ConvexHull(qhull_data, qhull_options="QJ i").simplices
else:
return ConvexHull(qhull_data, qhull_options="Qt i").simplices
class PDPlotter:
"""
A plotter class for phase diagrams.
"""
def __init__(self, phasediagram, show_unstable=0, **plotkwargs):
r"""
Args:
phasediagram: PhaseDiagram object.
show_unstable (float): Whether unstable phases will be plotted as
well as red crosses. If a number > 0 is entered, all phases with
ehull < show_unstable will be shown.
**plotkwargs: Keyword args passed to matplotlib.pyplot.plot. Can
be used to customize markers etc. If not set, the default is
{
"markerfacecolor": (0.2157, 0.4941, 0.7216),
"markersize": 10,
"linewidth": 3
}
"""
# note: palettable imports matplotlib
from palettable.colorbrewer.qualitative import Set1_3
self._pd = phasediagram
self._dim = len(self._pd.elements)
if self._dim > 4:
raise ValueError("Only 1-4 components supported!")
self.lines = uniquelines(self._pd.facets) if self._dim > 1 else \
[[self._pd.facets[0][0], self._pd.facets[0][0]]]
self.show_unstable = show_unstable
colors = Set1_3.mpl_colors
self.plotkwargs = plotkwargs or {
"markerfacecolor": colors[2],
"markersize": 10,
"linewidth": 3
}
@property
def pd_plot_data(self):
"""
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(0, len(all_entries)):
entry = all_entries[i]
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2],
all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3],
all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def get_plot(self, label_stable=True, label_unstable=True, ordering=None,
energy_colormap=None, process_attributes=False, plt=None):
"""
:param label_stable: Whether to label stable compounds.
:param label_unstable: Whether to label unstable compounds.
:param ordering: Ordering of vertices.
:param energy_colormap: Colormap for coloring energy.
:param process_attributes: Whether to process the attributes.
:param plt: Existing plt object if plotting multiple phase diagrams.
:return: matplotlib.pyplot.
"""
if self._dim < 4:
plt = self._get_2d_plot(label_stable, label_unstable, ordering,
energy_colormap, plt=plt,
process_attributes=process_attributes)
elif self._dim == 4:
plt = self._get_3d_plot(label_stable)
return plt
def plot_element_profile(self, element, comp, show_label_index=None,
xlim=5):
"""
Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element.
"""
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]["reaction"].reactants[0].num_atoms
element_energy = evolution[0]['chempot']
x1, x2, y1 = None, None, None
for i, d in enumerate(evolution):
v = -(d["chempot"] - element_energy)
if i != 0:
plt.plot([x2, x2], [y1, d["evolution"] / num_atoms],
'k', linewidth=2.5)
x1 = v
y1 = d["evolution"] / num_atoms
if i != len(evolution) - 1:
x2 = - (evolution[i + 1]["chempot"] - element_energy)
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula)
for p in d["reaction"].products
if p.reduced_formula != element.symbol]
plt.annotate(", ".join(products), xy=(v + 0.05, y1 + 0.05),
fontsize=24, color='r')
plt.plot([x1, x2], [y1, y1], 'r', linewidth=3)
else:
plt.plot([x1, x2], [y1, y1], 'k', linewidth=2.5)
plt.xlim((0, xlim))
plt.xlabel("-$\\Delta{\\mu}$ (eV)")
plt.ylabel("Uptake per atom")
return plt
def show(self, *args, **kwargs):
r"""
Draws the phase diagram using Matplotlib and show it.
Args:
*args: Passed to get_plot.
**kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def _get_2d_plot(self, label_stable=True, label_unstable=True,
ordering=None, energy_colormap=None, vmin_mev=-60.0,
vmax_mev=60.0, show_colorbar=True,
process_attributes=False, plt=None):
"""
Shows the plot using pylab. Usually I won't do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
if plt is None:
plt = pretty_plot(8, 6)
from matplotlib.font_manager import FontProperties
if ordering is None:
(lines, labels, unstable) = self.pd_plot_data
else:
(_lines, _labels, _unstable) = self.pd_plot_data
(lines, labels, unstable) = order_phase_diagram(
_lines, _labels, _unstable, ordering)
if energy_colormap is None:
if process_attributes:
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
# One should think about a clever way to have "complex"
# attributes with complex processing options but with a clear
# logic. At this moment, I just use the attributes to know
# whether an entry is a new compound or an existing (from the
# ICSD or from the MP) one.
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "ko", **self.plotkwargs)
else:
plt.plot(x, y, "k*", **self.plotkwargs)
else:
for x, y in lines:
plt.plot(x, y, "ko-", **self.plotkwargs)
else:
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable
for x, y in lines:
plt.plot(x, y, "k-", markeredgecolor="k")
vmin = vmin_mev / 1000.0
vmax = vmax_mev / 1000.0
if energy_colormap == 'default':
mid = - vmin / (vmax - vmin)
cmap = LinearSegmentedColormap.from_list(
'my_colormap', [(0.0, '#005500'), (mid, '#55FF55'),
(mid, '#FFAAAA'), (1.0, '#FF0000')])
else:
cmap = energy_colormap
norm = Normalize(vmin=vmin, vmax=vmax)
_map = ScalarMappable(norm=norm, cmap=cmap)
_energies = [self._pd.get_equilibrium_reaction_energy(entry)
for coord, entry in labels.items()]
energies = [en if en < 0.0 else -0.00000001 for en in _energies]
vals_stable = _map.to_rgba(energies)
ii = 0
if process_attributes:
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=12)
else:
plt.plot(x, y, "*", markerfacecolor=vals_stable[ii],
markersize=18)
ii += 1
else:
for x, y in labels.keys():
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=15)
ii += 1
font = FontProperties()
font.set_weight("bold")
font.set_size(24)
# Sets a nice layout depending on the type of PD. Also defines a
# "center" for the PD, which then allows the annotations to be spread
# out in a nice manner.
if len(self._pd.elements) == 3:
plt.axis("equal")
plt.xlim((-0.1, 1.2))
plt.ylim((-0.1, 1.0))
plt.axis("off")
center = (0.5, math.sqrt(3) / 6)
else:
all_coords = labels.keys()
miny = min([c[1] for c in all_coords])
ybuffer = max(abs(miny) * 0.1, 0.1)
plt.xlim((-0.1, 1.1))
plt.ylim((miny - ybuffer, ybuffer))
center = (0.5, miny / 2)
plt.xlabel("Fraction", fontsize=28, fontweight='bold')
plt.ylabel("Formation energy (eV/fu)", fontsize=28,
fontweight='bold')
for coords in sorted(labels.keys(), key=lambda x: -x[1]):
entry = labels[coords]
label = entry.name
# The follow defines an offset for the annotation text emanating
# from the center of the PD. Results in fairly nice layouts for the
# most part.
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 \
else vec
valign = "bottom" if vec[1] > 0 else "top"
if vec[0] < -0.01:
halign = "right"
elif vec[0] > 0.01:
halign = "left"
else:
halign = "center"
if label_stable:
if process_attributes and entry.attribute == 'new':
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
color='g')
else:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font)
if self.show_unstable:
font = FontProperties()
font.set_size(16)
energies_unstable = [self._pd.get_e_above_hull(entry)
for entry, coord in unstable.items()]
if energy_colormap is not None:
energies.extend(energies_unstable)
vals_unstable = _map.to_rgba(energies_unstable)
ii = 0
for entry, coords in unstable.items():
ehull = self._pd.get_e_above_hull(entry)
if ehull < self.show_unstable:
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 \
if np.linalg.norm(vec) != 0 else vec
label = entry.name
if energy_colormap is None:
plt.plot(coords[0], coords[1], "ks", linewidth=3,
markeredgecolor="k", markerfacecolor="r",
markersize=8)
else:
plt.plot(coords[0], coords[1], "s", linewidth=3,
markeredgecolor="k",
markerfacecolor=vals_unstable[ii],
markersize=8)
if label_unstable:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign, color="b",
verticalalignment=valign,
fontproperties=font)
ii += 1
if energy_colormap is not None and show_colorbar:
_map.set_array(energies)
cbar = plt.colorbar(_map)
cbar.set_label(
'Energy [meV/at] above hull (in red)\nInverse energy ['
'meV/at] above hull (in green)',
rotation=-90, ha='left', va='center')
f = plt.gcf()
f.set_size_inches((8, 6))
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.07)
return plt
def _get_3d_plot(self, label_stable=True):
"""
Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(20)
(lines, labels, unstable) = self.pd_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = entry.name
if label_stable:
if len(entry.composition.elements) == 1:
ax.text(coords[0], coords[1], coords[2], label)
else:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(count, latexify(label)))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
ax.axis("off")
return plt
def write_image(self, stream, image_format="svg", **kwargs):
r"""
Writes the phase diagram to an image in a stream.
Args:
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
**kwargs: Pass through to get_plot functino.
"""
plt = self.get_plot(**kwargs)
f = plt.gcf()
f.set_size_inches((12, 10))
plt.savefig(stream, format=image_format)
def plot_chempot_range_map(self, elements, referenced=True):
"""
Plot the chemical potential range _map. Currently works only for
3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
"""
self.get_chempot_range_map_plot(elements, referenced=referenced).show()
def get_chempot_range_map_plot(self, elements, referenced=True):
"""
Returns a plot of the chemical potential range _map. Currently works
only for 3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(12, 8)
chempot_ranges = self._pd.get_chempot_range_map(
elements, referenced=referenced)
missing_lines = {}
excluded_region = []
for entry, lines in chempot_ranges.items():
comp = entry.composition
center_x = 0
center_y = 0
coords = []
contain_zero = any([comp.get_atomic_fraction(el) == 0
for el in elements])
is_boundary = (not contain_zero) and sum([comp.get_atomic_fraction(el) for el in elements]) == 1
for line in lines:
(x, y) = line.coords.transpose()
plt.plot(x, y, "k-")
for coord in line.coords:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
center_x += coord[0]
center_y += coord[1]
if is_boundary:
excluded_region.extend(line.coords)
if coords and contain_zero:
missing_lines[entry] = coords
else:
xy = (center_x / len(coords), center_y / len(coords))
plt.annotate(latexify(entry.name), xy, fontsize=22)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Shade the forbidden chemical potential regions.
excluded_region.append([xlim[1], ylim[1]])
excluded_region = sorted(excluded_region, key=lambda c: c[0])
(x, y) = np.transpose(excluded_region)
plt.fill(x, y, "0.80")
# The hull does not generate the missing horizontal and vertical lines.
# The following code fixes this.
el0 = elements[0]
el1 = elements[1]
for entry, coords in missing_lines.items():
center_x = sum([c[0] for c in coords])
center_y = sum([c[1] for c in coords])
comp = entry.composition
is_x = comp.get_atomic_fraction(el0) < 0.01
is_y = comp.get_atomic_fraction(el1) < 0.01
n = len(coords)
if not (is_x and is_y):
if is_x:
coords = sorted(coords, key=lambda c: c[1])
for i in [0, -1]:
x = [min(xlim), coords[i][0]]
y = [coords[i][1], coords[i][1]]
plt.plot(x, y, "k")
center_x += min(xlim)
center_y += coords[i][1]
elif is_y:
coords = sorted(coords, key=lambda c: c[0])
for i in [0, -1]:
x = [coords[i][0], coords[i][0]]
y = [coords[i][1], min(ylim)]
plt.plot(x, y, "k")
center_x += coords[i][0]
center_y += min(ylim)
xy = (center_x / (n + 2), center_y / (n + 2))
else:
center_x = sum(coord[0] for coord in coords) + xlim[0]
center_y = sum(coord[1] for coord in coords) + ylim[0]
xy = (center_x / (n + 1), center_y / (n + 1))
plt.annotate(latexify(entry.name), xy,
horizontalalignment="center",
verticalalignment="center", fontsize=22)
plt.xlabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el0.symbol))
plt.ylabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el1.symbol))
plt.tight_layout()
return plt
def get_contour_pd_plot(self):
"""
Plot a contour phase diagram plot, where phase triangles are colored
according to degree of instability by interpolation. Currently only
works for 3-component phase diagrams.
Returns:
A matplotlib plot object.
"""
from scipy import interpolate
from matplotlib import cm
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
plt = self._get_2d_plot()
data[:, 0:2] = triangular_coord(data[:, 0:2]).transpose()
for i, e in enumerate(entries):
data[i, 2] = self._pd.get_e_above_hull(e)
gridsize = 0.005
xnew = np.arange(0, 1., gridsize)
ynew = np.arange(0, 1, gridsize)
f = interpolate.LinearNDInterpolator(data[:, 0:2], data[:, 2])
znew = np.zeros((len(ynew), len(xnew)))
for (i, xval) in enumerate(xnew):
for (j, yval) in enumerate(ynew):
znew[j, i] = f(xval, yval)
plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)
plt.colorbar()
return plt
def uniquelines(q):
"""
Given all the facets, convert it into a set of unique lines. Specifically
used for converting convex hull facets into line pairs of coordinates.
Args:
q: A 2-dim sequence, where each row represents a facet. E.g.,
[[1,2,3],[3,6,7],...]
Returns:
setoflines:
A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
"""
setoflines = set()
for facets in q:
for line in itertools.combinations(facets, 2):
setoflines.add(tuple(sorted(line)))
return setoflines
def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def tet_coord(coord):
"""
Convert a 3D coordinate into a tetrahedron based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a tetrahedron-based coordinate system.
"""
unitvec = np.array([[1, 0, 0], [0.5, math.sqrt(3) / 2, 0],
[0.5, 1.0 / 3.0 * math.sqrt(3) / 2, math.sqrt(6) / 3]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def order_phase_diagram(lines, stable_entries, unstable_entries, ordering):
"""
Orders the entries (their coordinates) in a phase diagram plot according
to the user specified ordering.
Ordering should be given as ['Up', 'Left', 'Right'], where Up,
Left and Right are the names of the entries in the upper, left and right
corners of the triangle respectively.
Args:
lines: list of list of coordinates for lines in the PD.
stable_entries: {coordinate : entry} for each stable node in the
phase diagram. (Each coordinate can only have one stable phase)
unstable_entries: {entry: coordinates} for all unstable nodes in the
phase diagram.
ordering: Ordering of the phase diagram, given as a list ['Up',
'Left','Right']
Returns:
(newlines, newstable_entries, newunstable_entries):
- newlines is a list of list of coordinates for lines in the PD.
- newstable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- newunstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
yup = -1000.0
xleft = 1000.0
xright = -1000.0
for coord in stable_entries:
if coord[0] > xright:
xright = coord[0]
nameright = stable_entries[coord].name
if coord[0] < xleft:
xleft = coord[0]
nameleft = stable_entries[coord].name
if coord[1] > yup:
yup = coord[1]
nameup = stable_entries[coord].name
if (nameup not in ordering) or (nameright not in ordering) or (nameleft not in ordering):
raise ValueError(
'Error in ordering_phase_diagram : \n"{up}", "{left}" and "{'
'right}"'
' should be in ordering : {ord}'.format(up=nameup, left=nameleft,
right=nameright,
ord=ordering))
cc = np.array([0.5, np.sqrt(3.0) / 6.0], np.float)
if nameup == ordering[0]:
if nameleft == ordering[1]:
# The coordinates were already in the user ordering
return lines, stable_entries, unstable_entries
else:
newlines = [[np.array(1.0 - x), y] for x, y in lines]
newstable_entries = {(1.0 - c[0], c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (1.0 - c[0], c[1])
for entry, c in
unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[1]:
if nameleft == ordering[2]:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c120 * (xx - cc[0]) - s120 * (y[ii] - cc[1]) + cc[0]
newy[ii] = s120 * (xx - cc[0]) + c120 * (y[ii] - cc[1]) + cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c120 * (xx - 1.0) - s120 * y[ii] + 1.0
newy[ii] = -s120 * (xx - 1.0) + c120 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[2]:
if nameleft == ordering[0]:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c240 * (xx - cc[0]) - s240 * (y[ii] - cc[1]) + cc[0]
newy[ii] = s240 * (xx - cc[0]) + c240 * (y[ii] - cc[1]) + cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c240 * xx - s240 * y[ii]
newy[ii] = -s240 * xx + c240 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
|
gVallverdu/pymatgen
|
pymatgen/analysis/phase_diagram.py
|
Python
|
mit
| 84,336
|
[
"pymatgen"
] |
d595e60fa9e70862449fec485201669bc9773bb4e8ff576ecd6259451ea84444
|
#!/usr/bin/env python
# coding: utf-8
from datetime import datetime
from distutils import spawn
import argparse
import json
import os
import platform
import shutil
import socket
import sys
import time
import urllib
import urllib2
import main
from main import config
###############################################################################
# Options
###############################################################################
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'-w', '--watch', dest='watch', action='store_true',
help='watch files for changes when running the development web server',
)
PARSER.add_argument(
'-c', '--clean', dest='clean', action='store_true',
help='recompiles files when running the development web server',
)
PARSER.add_argument(
'-C', '--clean-all', dest='clean_all', action='store_true',
help='''Cleans all the pip, Node & Bower related tools / libraries and
updates them to their latest versions''',
)
PARSER.add_argument(
'-m', '--minify', dest='minify', action='store_true',
help='compiles files into minified version before deploying'
)
PARSER.add_argument(
'-s', '--start', dest='start', action='store_true',
help='starts the dev_appserver.py with storage_path pointing to temp',
)
PARSER.add_argument(
'-o', '--host', dest='host', action='store', default='127.0.0.1',
help='the host to start the dev_appserver.py',
)
PARSER.add_argument(
'-p', '--port', dest='port', action='store', default='8080',
help='the port to start the dev_appserver.py',
)
PARSER.add_argument(
'-f', '--flush', dest='flush', action='store_true',
help='clears the datastore, blobstore, etc',
)
PARSER.add_argument(
'--appserver-args', dest='args', nargs=argparse.REMAINDER, default=[],
help='all following args are passed to dev_appserver.py',
)
ARGS = PARSER.parse_args()
###############################################################################
# Globals
###############################################################################
BAD_ENDINGS = ['pyc', 'pyo', '~']
GAE_PATH = ''
IS_WINDOWS = platform.system() == 'Windows'
###############################################################################
# Directories
###############################################################################
DIR_BOWER_COMPONENTS = 'bower_components'
DIR_MAIN = 'main'
DIR_NODE_MODULES = 'node_modules'
DIR_STYLE = 'style'
DIR_SCRIPT = 'script'
DIR_TEMP = 'temp'
DIR_VENV = os.path.join(DIR_TEMP, 'venv')
DIR_STATIC = os.path.join(DIR_MAIN, 'static')
DIR_SRC = os.path.join(DIR_STATIC, 'src')
DIR_SRC_SCRIPT = os.path.join(DIR_SRC, DIR_SCRIPT)
DIR_SRC_STYLE = os.path.join(DIR_SRC, DIR_STYLE)
DIR_DST = os.path.join(DIR_STATIC, 'dst')
DIR_DST_STYLE = os.path.join(DIR_DST, DIR_STYLE)
DIR_DST_SCRIPT = os.path.join(DIR_DST, DIR_SCRIPT)
DIR_EXT = os.path.join(DIR_STATIC, 'ext')
DIR_MIN = os.path.join(DIR_STATIC, 'min')
DIR_MIN_STYLE = os.path.join(DIR_MIN, DIR_STYLE)
DIR_MIN_SCRIPT = os.path.join(DIR_MIN, DIR_SCRIPT)
DIR_LIB = os.path.join(DIR_MAIN, 'lib')
DIR_LIBX = os.path.join(DIR_MAIN, 'libx')
FILE_LIB = '%s.zip' % DIR_LIB
FILE_REQUIREMENTS = 'requirements.txt'
FILE_BOWER = 'bower.json'
FILE_PACKAGE = 'package.json'
FILE_PIP_GUARD = os.path.join(DIR_TEMP, 'pip.guard')
FILE_NPM_GUARD = os.path.join(DIR_TEMP, 'npm.guard')
FILE_BOWER_GUARD = os.path.join(DIR_TEMP, 'bower.guard')
DIR_BIN = os.path.join(DIR_NODE_MODULES, '.bin')
FILE_COFFEE = os.path.join(DIR_BIN, 'coffee')
FILE_GULP = os.path.join(DIR_BIN, 'gulp')
FILE_LESS = os.path.join(DIR_BIN, 'lessc')
FILE_UGLIFYJS = os.path.join(DIR_BIN, 'uglifyjs')
FILE_VENV = os.path.join(DIR_VENV, 'Scripts', 'activate.bat') \
if IS_WINDOWS \
else os.path.join(DIR_VENV, 'bin', 'activate')
DIR_STORAGE = os.path.join(DIR_TEMP, 'storage')
FILE_UPDATE = os.path.join(DIR_TEMP, 'update.json')
###############################################################################
# Other global variables
###############################################################################
CORE_VERSION_URL = 'https://gae-init.appspot.com/_s/version/'
INERNET_TEST_URL = 'http://74.125.228.100'
REQUIREMENTS_URL = 'http://docs.gae-init.appspot.com/requirement/'
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print '[%s] %12s %s' % (timestamp, script, filename)
def make_dirs(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def remove_file_dir(file_dir):
if isinstance(file_dir, list) or isinstance(file_dir, tuple):
for file_ in file_dir:
remove_file_dir(file_)
else:
if not os.path.exists(file_dir):
return
if os.path.isdir(file_dir):
shutil.rmtree(file_dir, ignore_errors=True)
else:
os.remove(file_dir)
def clean_files(bad_endings=BAD_ENDINGS, in_dir='.'):
print_out(
'CLEAN FILES',
'Removing files: %s' % ', '.join(['*%s' % e for e in bad_endings]),
)
for root, _, files in os.walk(in_dir):
for filename in files:
for bad_ending in bad_endings:
if filename.endswith(bad_ending):
remove_file_dir(os.path.join(root, filename))
def merge_files(source, target):
fout = open(target, 'a')
for line in open(source):
fout.write(line)
fout.close()
def os_execute(executable, args, source, target, append=False):
operator = '>>' if append else '>'
os.system('%s %s %s %s %s' % (executable, args, source, operator, target))
def compile_script(source, target_dir):
if not os.path.isfile(source):
print_out('NOT FOUND', source)
return
target = source.replace(DIR_SRC_SCRIPT, target_dir).replace('.coffee', '.js')
if not is_dirty(source, target):
return
make_dirs(os.path.dirname(target))
if not source.endswith('.coffee'):
print_out('COPYING', source)
shutil.copy(source, target)
return
print_out('COFFEE', source)
os_execute(FILE_COFFEE, '-cp', source, target)
def compile_style(source, target_dir, check_modified=False):
if not os.path.isfile(source):
print_out('NOT FOUND', source)
return
if not source.endswith('.less'):
return
target = source.replace(DIR_SRC_STYLE, target_dir).replace('.less', '.css')
if check_modified and not is_style_modified(target):
return
minified = ''
if target_dir == DIR_MIN_STYLE:
minified = '-x'
target = target.replace('.css', '.min.css')
print_out('LESS MIN', source)
else:
print_out('LESS', source)
make_dirs(os.path.dirname(target))
os_execute(FILE_LESS, minified, source, target)
def make_lib_zip(force=False):
if force and os.path.isfile(FILE_LIB):
remove_file_dir(FILE_LIB)
if not os.path.isfile(FILE_LIB):
if os.path.exists(DIR_LIB):
print_out('ZIP', FILE_LIB)
shutil.make_archive(DIR_LIB, 'zip', DIR_LIB)
else:
print_out('NOT FOUND', DIR_LIB)
def is_dirty(source, target):
if not os.access(target, os.O_RDONLY):
return True
return os.stat(source).st_mtime - os.stat(target).st_mtime > 0
def is_style_modified(target):
for root, _, files in os.walk(DIR_SRC):
for filename in files:
path = os.path.join(root, filename)
if path.endswith('.less') and is_dirty(path, target):
return True
return False
def compile_all_dst():
for source in config.STYLES:
compile_style(os.path.join(DIR_STATIC, source), DIR_DST_STYLE, True)
for _, scripts in config.SCRIPTS:
for source in scripts:
compile_script(os.path.join(DIR_STATIC, source), DIR_DST_SCRIPT)
def update_path_separators():
def fixit(path):
return path.replace('\\', '/').replace('/', os.sep)
for idx in xrange(len(config.STYLES)):
config.STYLES[idx] = fixit(config.STYLES[idx])
for _, scripts in config.SCRIPTS:
for idx in xrange(len(scripts)):
scripts[idx] = fixit(scripts[idx])
def listdir(directory, split_ext=False):
try:
if split_ext:
return [os.path.splitext(dir_)[0] for dir_ in os.listdir(directory)]
else:
return os.listdir(directory)
except OSError:
return []
def site_packages_path():
if IS_WINDOWS:
return os.path.join(DIR_VENV, 'Lib', 'site-packages')
py_version = 'python%s.%s' % sys.version_info[:2]
return os.path.join(DIR_VENV, 'lib', py_version, 'site-packages')
def create_virtualenv():
if not os.path.exists(FILE_VENV):
os.system('virtualenv --no-site-packages %s' % DIR_VENV)
os.system('echo %s >> %s' % (
'set PYTHONPATH=' if IS_WINDOWS else 'unset PYTHONPATH', FILE_VENV
))
pth_file = os.path.join(site_packages_path(), 'gae.pth')
echo_to = 'echo %s >> {pth}'.format(pth=pth_file)
os.system(echo_to % find_gae_path())
os.system(echo_to % os.path.abspath(DIR_LIBX))
fix_path_cmd = 'import dev_appserver; dev_appserver.fix_sys_path()'
os.system(echo_to % (
fix_path_cmd if IS_WINDOWS else '"%s"' % fix_path_cmd
))
return True
def exec_pip_commands(command):
script = []
if create_virtualenv():
activate_cmd = 'call %s' if IS_WINDOWS else 'source %s'
activate_cmd %= FILE_VENV
script.append(activate_cmd)
script.append('echo %s' % command)
script.append(command)
script = '&'.join(script) if IS_WINDOWS else \
'/bin/bash -c "%s"' % ';'.join(script)
os.system(script)
def make_guard(fname, cmd, spec):
with open(fname, 'w') as guard:
guard.write('Prevents %s execution if newer than %s' % (cmd, spec))
def guard_is_newer(guard, watched):
if os.path.exists(guard):
return os.path.getmtime(guard) > os.path.getmtime(watched)
return False
def check_if_pip_should_run():
return not guard_is_newer(FILE_PIP_GUARD, FILE_REQUIREMENTS)
def check_if_npm_should_run():
return not guard_is_newer(FILE_NPM_GUARD, FILE_PACKAGE)
def check_if_bower_should_run():
return not guard_is_newer(FILE_BOWER_GUARD, FILE_BOWER)
def install_py_libs():
if not check_if_pip_should_run():
return
exec_pip_commands('pip install -q -r %s' % FILE_REQUIREMENTS)
exclude_ext = ['.pth', '.pyc', '.egg-info', '.dist-info']
exclude_prefix = ['setuptools-', 'pip-', 'Pillow-']
exclude = [
'test', 'tests', 'pip', 'setuptools', '_markerlib', 'PIL',
'easy_install.py', 'pkg_resources.py'
]
def _exclude_prefix(pkg):
for prefix in exclude_prefix:
if pkg.startswith(prefix):
return True
return False
def _exclude_ext(pkg):
for ext in exclude_ext:
if pkg.endswith(ext):
return True
return False
def _get_dest(pkg):
make_dirs(DIR_LIB)
return os.path.join(DIR_LIB, pkg)
site_packages = site_packages_path()
dir_libs = listdir(DIR_LIB)
dir_libs.extend(listdir(DIR_LIBX))
for dir_ in listdir(site_packages):
if dir_ in dir_libs or dir_ in exclude:
continue
if _exclude_prefix(dir_) or _exclude_ext(dir_):
continue
src_path = os.path.join(site_packages, dir_)
copy = shutil.copy if os.path.isfile(src_path) else shutil.copytree
copy(src_path, _get_dest(dir_))
make_guard(FILE_PIP_GUARD, 'pip', FILE_REQUIREMENTS)
def clean_py_libs():
remove_file_dir([DIR_LIB, DIR_VENV])
def install_dependencies():
make_dirs(DIR_TEMP)
if check_if_npm_should_run():
make_guard(FILE_NPM_GUARD, 'npm', FILE_PACKAGE)
os.system('npm install')
if check_if_bower_should_run():
make_guard(FILE_BOWER_GUARD, 'bower', FILE_BOWER)
os.system('"%s" ext' % FILE_GULP)
install_py_libs()
def check_for_update():
if os.path.exists(FILE_UPDATE):
mtime = os.path.getmtime(FILE_UPDATE)
last = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d')
today = datetime.utcnow().strftime('%Y-%m-%d')
if last == today:
return
try:
with open(FILE_UPDATE, 'a'):
os.utime(FILE_UPDATE, None)
request = urllib2.Request(
CORE_VERSION_URL,
urllib.urlencode({'version': main.__version__}),
)
response = urllib2.urlopen(request)
with open(FILE_UPDATE, 'w') as update_json:
update_json.write(response.read())
except (urllib2.HTTPError, urllib2.URLError):
pass
def print_out_update():
try:
import pip
SemVer = pip.util.version.SemanticVersion
except AttributeError:
import pip._vendor.distlib.version
SemVer = pip._vendor.distlib.version.SemanticVersion
try:
with open(FILE_UPDATE, 'r') as update_json:
data = json.load(update_json)
if SemVer(main.__version__) < SemVer(data['version']):
print_out('UPDATE')
print_out(data['version'], 'Latest version of gae-init')
print_out(main.__version__, 'Your version is a bit behind')
print_out('CHANGESET', data['changeset'])
except (ValueError, KeyError):
os.remove(FILE_UPDATE)
except IOError:
pass
def update_missing_args():
if ARGS.start or ARGS.clean_all:
ARGS.clean = True
def uniq(seq):
seen = set()
return [e for e in seq if e not in seen and not seen.add(e)]
###############################################################################
# Doctor
###############################################################################
def internet_on():
try:
urllib2.urlopen(INERNET_TEST_URL, timeout=2)
return True
except (urllib2.URLError, socket.timeout):
return False
def check_requirement(check_func):
result, name, help_url_id = check_func()
if not result:
print_out('NOT FOUND', name)
if help_url_id:
print 'Please see %s%s' % (REQUIREMENTS_URL, help_url_id)
return False
return True
def find_gae_path():
global GAE_PATH
if GAE_PATH:
return GAE_PATH
if IS_WINDOWS:
gae_path = None
for path in os.environ['PATH'].split(os.pathsep):
if os.path.isfile(os.path.join(path, 'dev_appserver.py')):
gae_path = path
else:
gae_path = spawn.find_executable('dev_appserver.py')
if gae_path:
gae_path = os.path.dirname(os.path.realpath(gae_path))
if not gae_path:
return ''
gcloud_exec = 'gcloud.cmd' if IS_WINDOWS else 'gcloud'
if not os.path.isfile(os.path.join(gae_path, gcloud_exec)):
GAE_PATH = gae_path
else:
gae_path = os.path.join(gae_path, '..', 'platform', 'google_appengine')
if os.path.exists(gae_path):
GAE_PATH = os.path.realpath(gae_path)
return GAE_PATH
def fix_gcloud_gae_path():
gae_path = find_gae_path()
if os.path.exists(os.path.join(gae_path, '..', '..', 'bin', 'dev_appserver.py')):
return os.path.join(gae_path, '..', '..', 'bin')
return gae_path
def check_internet():
return internet_on(), 'Internet', ''
def check_gae():
return bool(find_gae_path()), 'Google App Engine SDK', '#gae'
def check_git():
return bool(spawn.find_executable('git')), 'Git', '#git'
def check_nodejs():
return bool(spawn.find_executable('node')), 'Node.js', '#nodejs'
def check_pip():
return bool(spawn.find_executable('pip')), 'pip', '#pip'
def check_virtualenv():
return bool(spawn.find_executable('virtualenv')), 'virtualenv', '#virtualenv'
def doctor_says_ok():
checkers = [check_gae, check_git, check_nodejs, check_pip, check_virtualenv]
if False in [check_requirement(check) for check in checkers]:
sys.exit(1)
return check_requirement(check_internet)
###############################################################################
# Main
###############################################################################
def run_clean():
print_out('CLEAN')
clean_files()
make_lib_zip(force=True)
if IS_WINDOWS:
clean_files(['css', 'js'], DIR_DST)
else:
remove_file_dir(DIR_DST)
make_dirs(DIR_DST)
compile_all_dst()
print_out('DONE')
def run_clean_all():
print_out('CLEAN ALL')
remove_file_dir([
DIR_BOWER_COMPONENTS, DIR_NODE_MODULES, DIR_EXT, DIR_MIN, DIR_DST
])
remove_file_dir([
FILE_PIP_GUARD, FILE_NPM_GUARD, FILE_BOWER_GUARD
])
clean_py_libs()
clean_files()
def run_minify():
print_out('MINIFY')
clean_files()
make_lib_zip(force=True)
remove_file_dir(DIR_MIN)
make_dirs(DIR_MIN_SCRIPT)
for source in config.STYLES:
compile_style(os.path.join(DIR_STATIC, source), DIR_MIN_STYLE)
cat, separator = ('type', ',') if IS_WINDOWS else ('cat', ' ')
for module, scripts in config.SCRIPTS:
scripts = uniq(scripts)
coffees = separator.join([
os.path.join(DIR_STATIC, script)
for script in scripts if script.endswith('.coffee')
])
pretty_js = os.path.join(DIR_MIN_SCRIPT, '%s.js' % module)
ugly_js = os.path.join(DIR_MIN_SCRIPT, '%s.min.js' % module)
print_out('COFFEE MIN', ugly_js)
if len(coffees):
os_execute(cat, coffees, ' | %s --compile --stdio' % FILE_COFFEE, pretty_js, append=True)
for script in scripts:
if not script.endswith('.js'):
continue
script_file = os.path.join(DIR_STATIC, script)
merge_files(script_file, pretty_js)
os_execute(FILE_UGLIFYJS, pretty_js, '-cm', ugly_js)
remove_file_dir(pretty_js)
print_out('DONE')
def run_watch():
print_out('WATCHING')
make_lib_zip()
make_dirs(DIR_DST)
compile_all_dst()
print_out('DONE', 'and watching for changes (Ctrl+C to stop)')
while True:
time.sleep(0.5)
reload(config)
update_path_separators()
compile_all_dst()
def run_flush():
remove_file_dir(DIR_STORAGE)
print_out('STORAGE CLEARED')
def run_start():
make_dirs(DIR_STORAGE)
clear = 'yes' if ARGS.flush else 'no'
port = int(ARGS.port)
base_cmd = 'python -u "%s"' if IS_WINDOWS else '"%s"'
run_command = ' '.join(map(str, [
base_cmd % os.path.join(fix_gcloud_gae_path(), 'dev_appserver.py'),
DIR_MAIN,
'--host %s' % ARGS.host,
'--port %s' % port,
'--admin_port %s' % (port + 1),
'--storage_path=%s' % DIR_STORAGE,
'--clear_datastore=%s' % clear,
'--skip_sdk_update_check',
] + ARGS.args))
os.system(run_command)
def run():
if len(sys.argv) == 1 or (ARGS.args and not ARGS.start):
PARSER.print_help()
sys.exit(1)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
update_path_separators()
update_missing_args()
if ARGS.clean_all:
run_clean_all()
if doctor_says_ok():
install_dependencies()
check_for_update()
print_out_update()
if ARGS.clean:
run_clean()
if ARGS.minify:
run_minify()
if ARGS.watch:
run_watch()
if ARGS.flush:
run_flush()
if ARGS.start:
run_start()
if __name__ == '__main__':
run()
|
ssxenon01/contact-sharing
|
run.py
|
Python
|
mit
| 18,639
|
[
"GULP"
] |
d86e53c9f43c555f5c95dba8d9e086899aeb559a1bda0c30afaa509a7fd1ca23
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Sets general pylada parameters. """
jobparams_readonly = False
""" Whether items can be modified in parallel using attribute syntax. """
jobparams_naked_end = True
""" Whether last item is returned as is or wrapped in ForwardingDict. """
jobparams_only_existing = True
""" Whether attributes can be added or only modified. """
unix_re = True
""" If True, then all regex matching is done using unix-command-line patterns. """
verbose_representation = True
""" Whether functional should be printed verbosely or not. """
ipython_verbose_representation = False
""" When in ipython, should we set :py:data:`verbose_representation` to False. """
global_root = '/'
""" Root of relative paths.
This can be set an environment variable, say "$PYLADA" to make it easier to
transfer job-dictionaries from one computer to another. All file paths in
Pylada are then given with respect to this one. As long as the structure of
the disk is the same relative to this path, all Pylada paths will point to
equivalent objects.
"""
global_tmpdir = None
""" Global temporary directory for Pylada.
If None, defaults to system tmp dir. However, two environment variable take
precedence: PBS_TMPDIR and PYLADA_TMPDIR.
"""
|
pylada/pylada-light
|
src/pylada/config/general.py
|
Python
|
gpl-3.0
| 2,387
|
[
"CRYSTAL",
"VASP"
] |
06657e9e708bd3851ad56fc7a86b74930caef24d6bf1a80d4623e3b6e8333716
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkMaskPoints(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkMaskPoints(), 'Processing.',
('vtkDataSet',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkMaskPoints.py
|
Python
|
bsd-3-clause
| 480
|
[
"VTK"
] |
bc6fc89ae0569df7a1a680d95533192455bba3ce47a038ff737e3f4d1f97f047
|
import functools
import warnings
from collections import Mapping, Sequence
from numbers import Number
import numpy as np
import pandas as pd
from . import ops
from . import utils
from . import common
from . import groupby
from . import indexing
from . import alignment
from . import formatting
from .. import conventions
from .alignment import align, partial_align
from .coordinates import DatasetCoordinates, Indexes
from .common import ImplementsDatasetReduce, BaseDataObject
from .utils import (Frozen, SortedKeysDict, ChainMap, maybe_wrap_array)
from .variable import as_variable, Variable, Coordinate, broadcast_variables
from .pycompat import (iteritems, itervalues, basestring, OrderedDict,
dask_array_type)
from .combine import concat
# list of attributes of pd.DatetimeIndex that are ndarrays of time info
_DATETIMEINDEX_COMPONENTS = ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond', 'date',
'time', 'dayofyear', 'weekofyear', 'dayofweek',
'quarter']
def _get_virtual_variable(variables, key):
"""Get a virtual variable (e.g., 'time.year') from a dict of xray.Variable
objects (if possible)
"""
if not isinstance(key, basestring):
raise KeyError(key)
split_key = key.split('.', 1)
if len(split_key) != 2:
raise KeyError(key)
ref_name, var_name = split_key
ref_var = variables[ref_name]
if ref_var.ndim == 1:
date = ref_var.to_index()
elif ref_var.ndim == 0:
date = pd.Timestamp(ref_var.values)
else:
raise KeyError(key)
if var_name == 'season':
# TODO: move 'season' into pandas itself
seasons = np.array(['DJF', 'MAM', 'JJA', 'SON'])
month = date.month
data = seasons[(month // 3) % 4]
else:
data = getattr(date, var_name)
return ref_name, var_name, Variable(ref_var.dims, data)
def _as_dataset_variable(name, var):
"""Prepare a variable for adding it to a Dataset
"""
try:
var = as_variable(var, key=name)
except TypeError:
raise TypeError('Dataset variables must be an array or a tuple of '
'the form (dims, data[, attrs, encoding])')
if name in var.dims:
# convert the into an Index
if var.ndim != 1:
raise ValueError('an index variable must be defined with '
'1-dimensional data')
var = var.to_coord()
return var
def _align_variables(variables, join='outer'):
"""Align all DataArrays in the provided dict, leaving other values alone.
"""
alignable = [k for k, v in variables.items() if hasattr(v, 'indexes')]
aligned = align(*[variables[a] for a in alignable],
join=join, copy=False)
new_variables = OrderedDict(variables)
new_variables.update(zip(alignable, aligned))
return new_variables
def _expand_variables(raw_variables, old_variables={}, compat='identical'):
"""Expand a dictionary of variables.
Returns a dictionary of Variable objects suitable for inserting into a
Dataset._variables dictionary.
This includes converting tuples (dims, data) into Variable objects,
converting coordinate variables into Coordinate objects and expanding
DataArray objects into Variables plus coordinates.
Raises ValueError if any conflicting values are found, between any of the
new or old variables.
"""
new_variables = OrderedDict()
new_coord_names = set()
variables = ChainMap(new_variables, old_variables)
def maybe_promote_or_replace(name, var):
existing_var = variables[name]
if name not in existing_var.dims:
if name in var.dims:
variables[name] = var
else:
common_dims = OrderedDict(zip(existing_var.dims,
existing_var.shape))
common_dims.update(zip(var.dims, var.shape))
variables[name] = existing_var.expand_dims(common_dims)
new_coord_names.update(var.dims)
def add_variable(name, var):
var = _as_dataset_variable(name, var)
if name not in variables:
variables[name] = var
new_coord_names.update(variables[name].dims)
else:
if not getattr(variables[name], compat)(var):
raise ValueError('conflicting value for variable %s:\n'
'first value: %r\nsecond value: %r'
% (name, variables[name], var))
if compat == 'broadcast_equals':
maybe_promote_or_replace(name, var)
for name, var in iteritems(raw_variables):
if hasattr(var, 'coords'):
# it's a DataArray
new_coord_names.update(var.coords)
for dim, coord in iteritems(var.coords):
if dim != name:
add_variable(dim, coord.variable)
var = var.variable
add_variable(name, var)
return new_variables, new_coord_names
def _calculate_dims(variables):
"""Calculate the dimensions corresponding to a set of variables.
Returns dictionary mapping from dimension names to sizes. Raises ValueError
if any of the dimension sizes conflict.
"""
dims = {}
last_used = {}
scalar_vars = set(k for k, v in iteritems(variables) if not v.dims)
for k, var in iteritems(variables):
for dim, size in zip(var.dims, var.shape):
if dim in scalar_vars:
raise ValueError('dimension %s already exists as a scalar '
'variable' % dim)
if dim not in dims:
dims[dim] = size
last_used[dim] = k
elif dims[dim] != size:
raise ValueError('conflicting sizes for dimension %r: '
'length %s on %r and length %s on %r' %
(dim, size, k, dims[dim], last_used[dim]))
return dims
def _merge_expand(aligned_self, other, overwrite_vars, compat):
possible_conflicts = dict((k, v) for k, v in aligned_self._variables.items()
if k not in overwrite_vars)
new_vars, new_coord_names = _expand_variables(other, possible_conflicts, compat)
replace_vars = aligned_self._variables.copy()
replace_vars.update(new_vars)
return replace_vars, new_vars, new_coord_names
def _merge_dataset(self, other, overwrite_vars, compat, join):
aligned_self, other = partial_align(self, other, join=join, copy=False)
replace_vars, new_vars, new_coord_names = _merge_expand(
aligned_self, other._variables, overwrite_vars, compat)
new_coord_names.update(other._coord_names)
return replace_vars, new_vars, new_coord_names
def _merge_dict(self, other, overwrite_vars, compat, join):
other = _align_variables(other, join='outer')
alignable = [k for k, v in other.items() if hasattr(v, 'indexes')]
aligned = partial_align(self, *[other[a] for a in alignable],
join=join, copy=False, exclude=overwrite_vars)
aligned_self = aligned[0]
other = OrderedDict(other)
other.update(zip(alignable, aligned[1:]))
return _merge_expand(aligned_self, other, overwrite_vars, compat)
def _assert_empty(args, msg='%s'):
if args:
raise ValueError(msg % args)
def as_dataset(obj):
"""Cast the given object to a Dataset.
Handles DataArrays, Datasets and dictionaries of variables. A new Dataset
object is only created in the last case.
"""
obj = getattr(obj, '_dataset', obj)
if not isinstance(obj, Dataset):
obj = Dataset(obj)
return obj
class Variables(Mapping):
def __init__(self, dataset):
self._dataset = dataset
def __iter__(self):
return (key for key in self._dataset._variables
if key not in self._dataset._coord_names)
def __len__(self):
return len(self._dataset._variables) - len(self._dataset._coord_names)
def __contains__(self, key):
return (key in self._dataset._variables
and key not in self._dataset._coord_names)
def __getitem__(self, key):
if key not in self._dataset._coord_names:
return self._dataset[key]
else:
raise KeyError(key)
def __repr__(self):
return formatting.vars_repr(self)
class _LocIndexer(object):
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, key):
if not utils.is_dict_like(key):
raise TypeError('can only lookup dictionaries from Dataset.loc')
return self.dataset.sel(**key)
class Dataset(Mapping, ImplementsDatasetReduce, BaseDataObject):
"""A multi-dimensional, in memory, array database.
A dataset resembles an in-memory representation of a NetCDF file, and
consists of variables, coordinates and attributes which together form a
self describing dataset.
Dataset implements the mapping interface with keys given by variable names
and values given by DataArray objects for each variable name.
One dimensional variables with name equal to their dimension are index
coordinates used for label based indexing.
"""
# class properties defined for the benefit of __setstate__, which otherwise
# runs into trouble because we overrode __getattr__
_attrs = None
_variables = Frozen({})
groupby_cls = groupby.DatasetGroupBy
def __init__(self, variables=None, coords=None, attrs=None,
compat='broadcast_equals'):
"""To load data from a file or file-like object, use the `open_dataset`
function.
Parameters
----------
variables : dict-like, optional
A mapping from variable names to :py:class:`~xray.DataArray`
objects, :py:class:`~xray.Variable` objects or tuples of the
form ``(dims, data[, attrs])`` which can be used as arguments to
create a new ``Variable``. Each dimension must have the same length
in all variables in which it appears.
coords : dict-like, optional
Another mapping in the same form as the `variables` argument,
except the each item is saved on the dataset as a "coordinate".
These variables have an associated meaning: they describe
constant/fixed/independent quantities, unlike the
varying/measured/dependent quantities that belong in `variables`.
Coordinates values may be given by 1-dimensional arrays or scalars,
in which case `dims` do not need to be supplied: 1D arrays will be
assumed to give index values along the dimension with the same
name.
attrs : dict-like, optional
Global attributes to save on this dataset.
compat : {'broadcast_equals', 'equals', 'identical'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
"""
self._variables = OrderedDict()
self._coord_names = set()
self._dims = {}
self._attrs = None
self._file_obj = None
if variables is None:
variables = {}
if coords is None:
coords = set()
if variables or coords:
self._set_init_vars_and_dims(variables, coords, compat)
if attrs is not None:
self.attrs = attrs
def _add_missing_coords_inplace(self):
"""Add missing coordinates to self._variables
"""
for dim, size in iteritems(self.dims):
if dim not in self._variables:
# This is equivalent to np.arange(size), but
# waits to create the array until its actually accessed.
data = indexing.LazyIntegerRange(size)
coord = Coordinate(dim, data)
self._variables[dim] = coord
def _update_vars_and_coords(self, new_variables, new_coord_names={},
needs_copy=True, check_coord_names=True):
"""Add a dictionary of new variables to this dataset.
Raises a ValueError if any dimensions have conflicting lengths in the
new dataset. Otherwise will update this dataset's _variables and
_dims attributes in-place.
Set `needs_copy=False` only if this dataset is brand-new and hence
can be thrown away if this method fails.
"""
# default to creating another copy of variables so can unroll if we end
# up with inconsistent dimensions
variables = self._variables.copy() if needs_copy else self._variables
if check_coord_names:
_assert_empty([k for k in self.data_vars if k in new_coord_names],
'coordinates with these names already exist as '
'variables: %s')
variables.update(new_variables)
dims = _calculate_dims(variables)
# all checks are complete: it's safe to update
self._variables = variables
self._dims = dims
self._add_missing_coords_inplace()
self._coord_names.update(new_coord_names)
def _set_init_vars_and_dims(self, vars, coords, compat):
"""Set the initial value of Dataset variables and dimensions
"""
_assert_empty([k for k in vars if k in coords],
'redundant variables and coordinates: %s')
variables = ChainMap(vars, coords)
aligned = _align_variables(variables)
new_variables, new_coord_names = _expand_variables(aligned,
compat=compat)
new_coord_names.update(coords)
self._update_vars_and_coords(new_variables, new_coord_names,
needs_copy=False, check_coord_names=False)
@classmethod
def load_store(cls, store, decoder=None):
"""Create a new dataset from the contents of a backends.*DataStore
object
"""
variables, attributes = store.load()
if decoder:
variables, attributes = decoder(variables, attributes)
obj = cls(variables, attrs=attributes)
obj._file_obj = store
return obj
def close(self):
"""Close any files linked to this dataset
"""
if self._file_obj is not None:
self._file_obj.close()
self._file_obj = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getstate__(self):
"""Always load data in-memory before pickling"""
self.load()
# self.__dict__ is the default pickle object, we don't need to
# implement our own __setstate__ method to make pickle work
state = self.__dict__.copy()
# throw away any references to datastores in the pickle
state['_file_obj'] = None
return state
@property
def variables(self):
"""Frozen dictionary of xray.Variable objects constituting this
dataset's data
"""
return Frozen(self._variables)
def _attrs_copy(self):
return None if self._attrs is None else OrderedDict(self._attrs)
@property
def attrs(self):
"""Dictionary of global attributes on this dataset
"""
if self._attrs is None:
self._attrs = OrderedDict()
return self._attrs
@attrs.setter
def attrs(self, value):
self._attrs = OrderedDict(value)
@property
def dims(self):
"""Mapping from dimension names to lengths.
This dictionary cannot be modified directly, but is updated when adding
new variables.
"""
return Frozen(SortedKeysDict(self._dims))
def load(self):
"""Manually trigger loading of this dataset's data from disk or a
remote source into memory and return this dataset.
Normally, it should not be necessary to call this method in user code,
because all xray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
"""
# access .data to coerce everything to numpy or dask arrays
all_data = dict((k, v.data) for k, v in self.variables.items())
lazy_data = dict((k, v) for k, v in all_data.items()
if isinstance(v, dask_array_type))
if lazy_data:
import dask.array as da
# evaluate all the dask arrays simultaneously
evaluated_data = da.compute(*lazy_data.values())
evaluated_variables = {}
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
return self
def load_data(self): # pragma: no cover
warnings.warn('the Dataset method `load_data` has been deprecated; '
'use `load` instead',
FutureWarning, stacklevel=2)
return self.load()
@classmethod
def _construct_direct(cls, variables, coord_names, dims, attrs,
file_obj=None):
"""Shortcut around __init__ for internal use when we want to skip
costly validation
"""
obj = object.__new__(cls)
obj._variables = variables
obj._coord_names = coord_names
obj._dims = dims
obj._attrs = attrs
obj._file_obj = file_obj
return obj
__default_attrs = object()
def _replace_vars_and_dims(self, variables, coord_names=None,
attrs=__default_attrs, inplace=False):
"""Fastpath constructor for internal use.
Preserves coord names and attributes; dimensions are recalculated from
the supplied variables.
The arguments are *not* copied when placed on the new dataset. It is up
to the caller to ensure that they have the right type and are not used
elsewhere.
Parameters
----------
variables : OrderedDict
coord_names : set or None, optional
attrs : OrderedDict or None, optional
Returns
-------
new : Dataset
"""
dims = _calculate_dims(variables)
if inplace:
self._dims = dims
self._variables = variables
if coord_names is not None:
self._coord_names = coord_names
if attrs is not self.__default_attrs:
self._attrs = attrs
obj = self
else:
if coord_names is None:
coord_names = self._coord_names.copy()
if attrs is self.__default_attrs:
attrs = self._attrs_copy()
obj = self._construct_direct(variables, coord_names, dims, attrs)
return obj
def copy(self, deep=False):
"""Returns a copy of this dataset.
If `deep=True`, a deep copy is made of each of the component variables.
Otherwise, a shallow copy is made, so each variable in the new dataset
is also a variable in the original dataset.
"""
if deep:
variables = OrderedDict((k, v.copy(deep=True))
for k, v in iteritems(self._variables))
else:
variables = self._variables.copy()
# skip __init__ to avoid costly validation
return self._construct_direct(variables, self._coord_names.copy(),
self._dims.copy(), self._attrs_copy())
def _copy_listed(self, names, keep_attrs=True):
"""Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation.
"""
variables = OrderedDict()
coord_names = set()
for name in names:
try:
variables[name] = self._variables[name]
except KeyError:
ref_name, var_name, var = _get_virtual_variable(
self._variables, name)
variables[var_name] = var
if ref_name in self._coord_names:
coord_names.add(var_name)
needed_dims = set()
for v in variables.values():
needed_dims.update(v._dims)
for k in self._coord_names:
if set(self._variables[k]._dims) <= needed_dims:
variables[k] = self._variables[k]
coord_names.add(k)
dims = dict((k, self._dims[k]) for k in needed_dims)
attrs = self.attrs.copy() if keep_attrs else None
return self._construct_direct(variables, coord_names, dims, attrs)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
def __contains__(self, key):
"""The 'in' operator will return true or false depending on whether
'key' is an array in the dataset or not.
"""
return key in self._variables
def __len__(self):
return len(self._variables)
def __iter__(self):
return iter(self._variables)
@property
def nbytes(self):
return sum(v.nbytes for v in self.variables.values())
@property
def loc(self):
"""Attribute for location based indexing. Only supports __getitem__,
and only when the key is a dict of the form {dim: labels}.
"""
return _LocIndexer(self)
def __getitem__(self, key):
"""Access variables or coordinates this dataset as a
:py:class:`~xray.DataArray`.
Indexing with a list of names will return a new ``Dataset`` object.
"""
from .dataarray import DataArray
if utils.is_dict_like(key):
return self.isel(**key)
key = np.asarray(key)
if key.ndim == 0:
return DataArray._new_from_dataset(self, key.item())
else:
return self._copy_listed(key)
def __setitem__(self, key, value):
"""Add an array to this dataset.
If value is a `DataArray`, call its `select_vars()` method, rename it
to `key` and merge the contents of the resulting dataset into this
dataset.
If value is an `Variable` object (or tuple of form
``(dims, data[, attrs])``), add it to this dataset as a new
variable.
"""
if utils.is_dict_like(key):
raise NotImplementedError('cannot yet use a dictionary as a key '
'to set Dataset values')
self.update({key: value})
def __delitem__(self, key):
"""Remove a variable from this dataset.
If this variable is a dimension, all variables containing this
dimension are also removed.
"""
def remove(k):
del self._variables[k]
self._coord_names.discard(k)
remove(key)
if key in self._dims:
del self._dims[key]
also_delete = [k for k, v in iteritems(self._variables)
if key in v.dims]
for key in also_delete:
remove(key)
# mutable objects should not be hashable
__hash__ = None
def _all_compat(self, other, compat_str):
"""Helper function for equals and identical"""
# some stores (e.g., scipy) do not seem to preserve order, so don't
# require matching order for equality
compat = lambda x, y: getattr(x, compat_str)(y)
return (self._coord_names == other._coord_names
and utils.dict_equiv(self._variables, other._variables,
compat=compat))
def broadcast_equals(self, other):
"""Two Datasets are broadcast equal if they are equal after
broadcasting all variables against each other.
For example, variables that are scalar in one dataset but non-scalar in
the other dataset can still be broadcast equal if the the non-scalar
variable is a constant.
See Also
--------
Dataset.equals
Dataset.identical
"""
try:
return self._all_compat(other, 'broadcast_equals')
except (TypeError, AttributeError):
return False
def equals(self, other):
"""Two Datasets are equal if they have matching variables and
coordinates, all of which are equal.
Datasets can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for ``Dataset``
does element-wise comparisions (like numpy.ndarrays).
See Also
--------
Dataset.broadcast_equals
Dataset.identical
"""
try:
return self._all_compat(other, 'equals')
except (TypeError, AttributeError):
return False
def identical(self, other):
"""Like equals, but also checks all dataset attributes and the
attributes on all variables and coordinates.
See Also
--------
Dataset.broadcast_equals
Dataset.equals
"""
try:
return (utils.dict_equiv(self.attrs, other.attrs)
and self._all_compat(other, 'identical'))
except (TypeError, AttributeError):
return False
@property
def indexes(self):
"""OrderedDict of pandas.Index objects used for label based indexing
"""
return Indexes(self)
@property
def coords(self):
"""Dictionary of xray.DataArray objects corresponding to coordinate
variables
"""
return DatasetCoordinates(self)
@property
def data_vars(self):
"""Dictionary of xray.DataArray objects corresponding to data variables
"""
return Variables(self)
@property
def vars(self): # pragma: no cover
warnings.warn('the Dataset property `vars` has been deprecated; '
'use `data_vars` instead',
FutureWarning, stacklevel=2)
return self.data_vars
def set_coords(self, names, inplace=False):
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
"""
# TODO: allow inserting new coordinates with this method, like
# DataFrame.set_index?
# nb. check in self._variables, not self.data_vars to insure that the
# operation is idempotent
if isinstance(names, basestring):
names = [names]
self._assert_all_in_dataset(names)
obj = self if inplace else self.copy()
obj._coord_names.update(names)
return obj
def reset_coords(self, names=None, drop=False, inplace=False):
"""Given names of coordinates, reset them to become variables
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
"""
if names is None:
names = self._coord_names - set(self.dims)
else:
if isinstance(names, basestring):
names = [names]
self._assert_all_in_dataset(names)
_assert_empty(
set(names) & set(self.dims),
'cannot remove index coordinates with reset_coords: %s')
obj = self if inplace else self.copy()
obj._coord_names.difference_update(names)
if drop:
for name in names:
del obj._variables[name]
return obj
def dump_to_store(self, store, encoder=None, sync=True):
"""Store dataset contents to a backends.*DataStore object."""
variables, attrs = conventions.encode_dataset_coordinates(self)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs)
if sync:
store.sync()
def to_netcdf(self, path=None, mode='w', format=None, group=None,
engine=None):
"""Write dataset contents to a netCDF file.
Parameters
----------
path : str, optional
Path to which to save this dataset. If no path is provided, this
function returns the resulting netCDF file as a bytes object; in
this case, we need to use scipy.io.netcdf, which does not support
netCDF version 4 (the default format becomes NETCDF3_64BIT).
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', 'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatibile API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
"""
from ..backends.api import to_netcdf
return to_netcdf(self, path, mode, format, group, engine)
dump = utils.function_alias(to_netcdf, 'dumps')
dumps = utils.function_alias(to_netcdf, 'dumps')
def __repr__(self):
return formatting.dataset_repr(self)
@property
def chunks(self):
"""Block dimensions for this dataset's data or None if it's not a dask
array.
"""
chunks = {}
for v in self.variables.values():
if v.chunks is not None:
new_chunks = list(zip(v.dims, v.chunks))
if any(chunk != chunks[d] for d, chunk in new_chunks
if d in chunks):
raise ValueError('inconsistent chunks')
chunks.update(new_chunks)
return Frozen(SortedKeysDict(chunks))
def chunk(self, chunks=None, lock=False):
"""Coerce all arrays in this dataset into dask arrays with the given
chunks.
Non-dask arrays in this dataset will be converted to dask arrays. Dask
arrays will be rechunked to the given chunk sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int or dict, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xray.Dataset
"""
if isinstance(chunks, Number):
chunks = dict.fromkeys(self.dims, chunks)
if chunks is not None:
bad_dims = [d for d in chunks if d not in self.dims]
if bad_dims:
raise ValueError('some chunks keys are not dimensions on this '
'object: %s' % bad_dims)
def selkeys(dict_, keys):
if dict_ is None:
return None
return dict((d, dict_[d]) for d in keys if d in dict_)
def maybe_chunk(name, var, chunks):
chunks = selkeys(chunks, var.dims)
if not chunks:
chunks = None
if var.ndim > 0:
return var.chunk(chunks, name=name, lock=lock)
else:
return var
variables = OrderedDict([(k, maybe_chunk(k, v, chunks))
for k, v in self.variables.items()])
return self._replace_vars_and_dims(variables)
def isel(self, **indexers):
"""Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. In
general, each array's data will be a view of the array's data
in this dataset, unless numpy fancy indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.sel
DataArray.isel
DataArray.sel
"""
invalid = [k for k in indexers if not k in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
# all indexers should be int, slice or np.ndarrays
indexers = [(k, (np.asarray(v)
if not isinstance(v, (int, np.integer, slice))
else v))
for k, v in iteritems(indexers)]
variables = OrderedDict()
for name, var in iteritems(self._variables):
var_indexers = dict((k, v) for k, v in indexers if k in var.dims)
variables[name] = var.isel(**var_indexers)
return self._replace_vars_and_dims(variables)
def sel(self, method=None, **indexers):
"""Returns a new dataset with each array indexed by tick labels
along the specified dimension(s).
In contrast to `Dataset.isel`, indexers for this method should use
labels instead of integers.
Under the hood, this method is powered by using Panda's powerful Index
objects. This makes label based indexing essentially just as fast as
using integer indexing.
It also means this method uses pandas's (well documented) logic for
indexing. This means you can use string shortcuts for datetime indexes
(e.g., '2000-01' to select all values in January 2000). It also means
that slices are treated as inclusive of both the start and stop values,
unlike normal Python indexing.
Parameters
----------
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for inexact matches (requires pandas>=0.16):
* default: only exact matches
* pad / ffill: propgate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by scalars, slices or arrays of tick labels.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
variable and dimension is indexed by the appropriate indexers. In
general, each variable's data will be a view of the variable's data
in this dataset, unless numpy fancy indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.isel
DataArray.isel
DataArray.sel
"""
return self.isel(**indexing.remap_label_indexers(self, indexers,
method=method))
def isel_points(self, dim='points', **indexers):
"""Returns a new dataset with each array indexed pointwise along the
specified dimension(s).
This method selects pointwise values from each array and is akin to
the NumPy indexing behavior of `arr[[0, 1], [0, 1]]`, except this
method does not require knowing the order of each array's dimensions.
Parameters
----------
dim : str or DataArray or pandas.Index or other list-like object, optional
Name of the dimension to concatenate along. If dim is provided as a
string, it must be a new dimension name, in which case it is added
along axis=0. If dim is provided as a DataArray or Index or
list-like object, its name, which must not be present in the
dataset, is used as the dimension to concatenate along and the
values are added as a coordinate.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by array-like objects. All indexers must be the same length and
1 dimensional.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. With
pointwise indexing, the new Dataset will always be a copy of the
original.
See Also
--------
Dataset.sel
DataArray.isel
DataArray.sel
DataArray.isel_points
"""
indexer_dims = set(indexers)
def relevant_keys(mapping):
return [k for k, v in mapping.items()
if any(d in indexer_dims for d in v.dims)]
data_vars = relevant_keys(self.data_vars)
coords = relevant_keys(self.coords)
# all the indexers should be iterables
keys = indexers.keys()
indexers = [(k, np.asarray(v)) for k, v in iteritems(indexers)]
# Check that indexers are valid dims, integers, and 1D
for k, v in indexers:
if k not in self.dims:
raise ValueError("dimension %s does not exist" % k)
if v.dtype.kind != 'i':
raise TypeError('Indexers must be integers')
if v.ndim != 1:
raise ValueError('Indexers must be 1 dimensional')
# all the indexers should have the same length
lengths = set(len(v) for k, v in indexers)
if len(lengths) > 1:
raise ValueError('All indexers must be the same length')
# Existing dimensions are not valid choices for the dim argument
if isinstance(dim, basestring):
if dim in self.dims:
# dim is an invalid string
raise ValueError('Existing dimension names are not valid '
'choices for the dim argument in sel_points')
elif hasattr(dim, 'dims'):
# dim is a DataArray or Coordinate
if dim.name in self.dims:
# dim already exists
raise ValueError('Existing dimensions are not valid choices '
'for the dim argument in sel_points')
else:
# try to cast dim to DataArray with name = points
from .dataarray import DataArray
dim = DataArray(dim, dims='points', name='points')
# TODO: This would be sped up with vectorized indexing. This will
# require dask to support pointwise indexing as well.
return concat([self.isel(**d) for d in
[dict(zip(keys, inds)) for inds in
zip(*[v for k, v in indexers])]],
dim=dim, coords=coords, data_vars=data_vars)
def reindex_like(self, other, method=None, copy=True):
"""Conform this object onto the indexes of another object, filling
in missing values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mis-matched index values will be filled in with
NaN, and any mis-matched dimension names will simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values from other not found in this
dataset:
* default: don't fill gaps
* pad / ffill: propgate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
copy : bool, optional
If `copy=True`, the returned dataset contains only copied
variables. If `copy=False` and no reindexing is required then
original variables from this dataset are returned.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but coordinates from the
other object.
See Also
--------
Dataset.reindex
align
"""
return self.reindex(method=method, copy=copy, **other.indexes)
def reindex(self, indexers=None, method=None, copy=True, **kw_indexers):
"""Conform this object onto a new set of indexes, filling in
missing values with NaN.
Parameters
----------
indexers : dict. optional
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate values
will be filled in with NaN, and any mis-matched dimension names will
simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* default: don't fill gaps
* pad / ffill: propgate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
copy : bool, optional
If `copy=True`, the returned dataset contains only copied
variables. If `copy=False` and no reindexing is required then
original variables from this dataset are returned.
**kw_indexers : optional
Keyword arguments in the same form as ``indexers``.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.reindex_like
align
pandas.Index.get_indexer
"""
indexers = utils.combine_pos_and_kw_args(indexers, kw_indexers,
'reindex')
if not indexers:
# shortcut
return self.copy(deep=True) if copy else self
variables = alignment.reindex_variables(
self.variables, self.indexes, indexers, method, copy=copy)
return self._replace_vars_and_dims(variables)
def rename(self, name_dict, inplace=False):
"""Returns a new object with renamed variables and dimensions.
Parameters
----------
name_dict : dict-like
Dictionary whose keys are current variable or dimension names and
whose values are new names.
inplace : bool, optional
If True, rename variables and dimensions in-place. Otherwise,
return a new dataset object.
Returns
-------
renamed : Dataset
Dataset with renamed variables and dimensions.
See Also
--------
Dataset.swap_dims
DataArray.rename
"""
for k in name_dict:
if k not in self:
raise ValueError("cannot rename %r because it is not a "
"variable in this dataset" % k)
variables = OrderedDict()
coord_names = set()
for k, v in iteritems(self._variables):
name = name_dict.get(k, k)
dims = tuple(name_dict.get(dim, dim) for dim in v.dims)
var = v.copy(deep=False)
var.dims = dims
variables[name] = var
if k in self._coord_names:
coord_names.add(name)
return self._replace_vars_and_dims(variables, coord_names,
inplace=inplace)
def swap_dims(self, dims_dict, inplace=False):
"""Returns a new object with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names. Each value must already be a variable in the
dataset.
inplace : bool, optional
If True, swap dimensions in-place. Otherwise, return a new dataset
object.
Returns
-------
renamed : Dataset
Dataset with swapped dimensions.
See Also
--------
Dataset.rename
DataArray.swap_dims
"""
for k, v in dims_dict.items():
if k not in self.dims:
raise ValueError('cannot swap from dimension %r because it is '
'not an existing dimension' % k)
if self.variables[v].dims != (k,):
raise ValueError('replacement dimension %r is not a 1D '
'variable along the old dimension %r'
% (v, k))
result_dims = set(dims_dict.get(dim, dim) for dim in self.dims)
variables = OrderedDict()
coord_names = self._coord_names.copy()
coord_names.update(dims_dict.values())
for k, v in iteritems(self.variables):
dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)
var = v.to_coord() if k in result_dims else v.to_variable()
var.dims = dims
variables[k] = var
return self._replace_vars_and_dims(variables, coord_names,
inplace=inplace)
def update(self, other, inplace=True):
"""Update this dataset's variables with those from another dataset.
Parameters
----------
other : Dataset or castable to Dataset
Dataset or variables with which to update this dataset.
inplace : bool, optional
If True, merge the other dataset into this dataset in-place.
Otherwise, return a new dataset object.
Returns
-------
updated : Dataset
Updated dataset.
Raises
------
ValueError
If any dimensions would have inconsistent sizes in the updated
dataset.
"""
return self.merge(
other, inplace=inplace, overwrite_vars=list(other), join='left')
def merge(self, other, inplace=False, overwrite_vars=set(),
compat='broadcast_equals', join='outer'):
"""Merge the arrays of two datasets into a single dataset.
This method generally not allow for overriding data, with the exception
of attributes, which are ignored on the second dataset. Variables with
the same name are checked for conflicts via the equals or identical
methods.
Parameters
----------
other : Dataset or castable to Dataset
Dataset or variables to merge with this dataset.
inplace : bool, optional
If True, merge the other dataset into this dataset in-place.
Otherwise, return a new dataset object.
overwrite_vars : str or sequence, optional
If provided, update variables of these name(s) without checking for
conflicts in this dataset.
compat : {'broadcast_equals', 'equals', 'identical'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
join : {'outer', 'inner', 'left', 'right'}, optional
Method for joining ``self`` and ``other`` along shared dimensions:
- 'outer': use the union of the indexes
- 'inner': use the intersection of the indexes
- 'left': use indexes from ``self``
- 'right': use indexes from ``other``
Returns
-------
merged : Dataset
Merged dataset.
Raises
------
ValueError
If any variables conflict (see ``compat``).
"""
if compat not in ['broadcast_equals', 'equals', 'identical']:
raise ValueError("compat=%r invalid: must be 'broadcast_equals', "
"'equals' or 'identical'" % compat)
if isinstance(overwrite_vars, basestring):
overwrite_vars = [overwrite_vars]
overwrite_vars = set(overwrite_vars)
merge = _merge_dataset if isinstance(other, Dataset) else _merge_dict
replace_vars, new_vars, new_coord_names = merge(
self, other, overwrite_vars, compat=compat, join=join)
newly_coords = new_coord_names & (set(self) - set(self.coords))
no_longer_coords = set(self.coords) & (set(new_vars) - new_coord_names)
ambiguous_coords = (newly_coords | no_longer_coords) - overwrite_vars
if ambiguous_coords:
raise ValueError('cannot merge: the following variables are '
'coordinates on one dataset but not the other: %s'
% list(ambiguous_coords))
obj = self if inplace else self.copy()
obj._update_vars_and_coords(replace_vars, new_coord_names)
return obj
def _assert_all_in_dataset(self, names, virtual_okay=False):
bad_names = set(names) - set(self._variables)
if virtual_okay:
bad_names -= self.virtual_variables
if bad_names:
raise ValueError('One or more of the specified variables '
'cannot be found in this dataset')
def drop(self, labels, dim=None):
"""Drop variables or index labels from this dataset.
If a variable corresponding to a dimension is dropped, all variables
that use that dimension are also dropped.
Parameters
----------
labels : str
Names of variables or index labels to drop.
dim : None or str, optional
Dimension along which to drop index labels. By default (if
``dim is None``), drops variables rather than index labels.
Returns
-------
dropped : Dataset
"""
if utils.is_scalar(labels):
labels = [labels]
if dim is None:
return self._drop_vars(labels)
else:
new_index = self.indexes[dim].drop(labels)
return self.loc[{dim: new_index}]
def _drop_vars(self, names):
self._assert_all_in_dataset(names)
drop = set(names)
drop |= set(k for k, v in iteritems(self._variables)
if any(name in v.dims for name in names))
variables = OrderedDict((k, v) for k, v in iteritems(self._variables)
if k not in drop)
coord_names = set(k for k in self._coord_names if k in variables)
return self._replace_vars_and_dims(variables, coord_names)
def drop_vars(self, *names): # pragma: no cover
warnings.warn('the Dataset method `drop_vars` has been deprecated; '
'use `drop` instead',
FutureWarning, stacklevel=2)
return self.drop(names)
def transpose(self, *dims):
"""Return a new Dataset object with all array dimensions transposed.
Although the order of dimensions on each array will change, the dataset
dimensions themselves will remain in fixed (sorted) order.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions on each array. Otherwise,
reorder the dimensions to this order.
Returns
-------
transposed : Dataset
Each array in the dataset (including) coordinates will be
transposed to the given order.
Notes
-----
Although this operation returns a view of each array's data, it
is not lazy -- the data will be fully loaded into memory.
See Also
--------
numpy.transpose
DataArray.transpose
"""
if dims:
if set(dims) ^ set(self.dims):
raise ValueError('arguments to transpose (%s) must be '
'permuted dataset dimensions (%s)'
% (dims, tuple(self.dims)))
ds = self.copy()
for name, var in iteritems(self._variables):
var_dims = tuple(dim for dim in dims if dim in var.dims)
ds._variables[name] = var.transpose(*var_dims)
return ds
@property
def T(self):
return self.transpose()
def squeeze(self, dim=None):
"""Returns a new dataset with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : Dataset
This dataset, but with with all or a subset of the dimensions of
length 1 removed.
Notes
-----
Although this operation returns a view of each variable's data, it is
not lazy -- all variable data will be fully loaded.
See Also
--------
numpy.squeeze
"""
return common.squeeze(self, self.dims, dim)
def dropna(self, dim, how='any', thresh=None, subset=None):
"""Returns a new dataset with dropped labels for missing values along
the provided dimension.
Parameters
----------
dim : str
Dimension along which to drop missing values. Dropping along
multiple dimensions simultaneously is not yet supported.
how : {'any', 'all'}, optional
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
If supplied, require this many non-NA values.
subset : sequence, optional
Subset of variables to check for missing values. By default, all
variables in the dataset are checked.
Returns
-------
Dataset
"""
# TODO: consider supporting multiple dimensions? Or not, given that
# there are some ugly edge cases, e.g., pandas's dropna differs
# depending on the order of the supplied axes.
if dim not in self.dims:
raise ValueError('%s must be a single dataset dimension' % dim)
if subset is None:
subset = list(self.data_vars)
count = np.zeros(self.dims[dim], dtype=np.int64)
size = 0
for k in subset:
array = self._variables[k]
if dim in array.dims:
dims = [d for d in array.dims if d != dim]
count += array.count(dims)
size += np.prod([self.dims[d] for d in dims])
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == size
elif how == 'all':
mask = count > 0
elif how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
return self.isel(**{dim: mask})
def fillna(self, value):
"""Fill missing values in this object.
This operation follows the normal broadcasting and alignment rules that
xray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : scalar, ndarray, DataArray, dict or Dataset
Used to fill all matching missing values in this dataset's data
variables. Scalars, ndarrays or DataArrays arguments are used to
fill all data with aligned coordinates (for DataArrays).
Dictionaries or datasets match data variables and then align
coordinates if necessary.
Returns
-------
Dataset
"""
return self._fillna(value)
def reduce(self, func, dim=None, keep_attrs=False, numeric_only=False,
allow_lazy=False, **kwargs):
"""Reduce this dataset by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
**kwargs : dict
Additional keyword arguments passed on to ``func``.
Returns
-------
reduced : Dataset
Dataset with this object's DataArrays replaced with new DataArrays
of summarized data and the indicated dimension(s) removed.
"""
if isinstance(dim, basestring):
dims = set([dim])
elif dim is None:
dims = set(self.dims)
else:
dims = set(dim)
_assert_empty([dim for dim in dims if dim not in self.dims],
'Dataset does not contain the dimensions: %s')
variables = OrderedDict()
for name, var in iteritems(self._variables):
reduce_dims = [dim for dim in var.dims if dim in dims]
if reduce_dims or not var.dims:
if name not in self.coords:
if (not numeric_only
or np.issubdtype(var.dtype, np.number)
or var.dtype == np.bool_):
if len(reduce_dims) == 1:
# unpack dimensions for the benefit of functions
# like np.argmin which can't handle tuple arguments
reduce_dims, = reduce_dims
elif len(reduce_dims) == var.ndim:
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
# the former is often more efficient
reduce_dims = None
variables[name] = var.reduce(func, dim=reduce_dims,
keep_attrs=keep_attrs,
allow_lazy=allow_lazy,
**kwargs)
else:
variables[name] = var
coord_names = set(k for k in self.coords if k in variables)
attrs = self.attrs if keep_attrs else None
return self._replace_vars_and_dims(variables, coord_names, attrs)
def apply(self, func, keep_attrs=False, args=(), **kwargs):
"""Apply a function over the data variables in this dataset.
Parameters
----------
func : function
Function which can be called in the form `f(x, **kwargs)` to
transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new object will
be returned without attributes.
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs : dict
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` over each data variable.
"""
variables = OrderedDict(
(k, maybe_wrap_array(v, func(v, *args, **kwargs)))
for k, v in iteritems(self.data_vars))
attrs = self.attrs if keep_attrs else None
return type(self)(variables, attrs=attrs)
def assign(self, **kwargs):
"""Assign new data variables to a Dataset, returning a new object
with all the original variables in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the variables names. If the values are callable, they
are computed on the Dataset and assigned to new data variables. If
the values are not callable, (e.g. a DataArray, scalar, or array),
they are simply assigned.
Returns
-------
ds : Dataset
A new Dataset with the new variables in addition to all the
existing variables.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign`` is
possible, but you cannot reference other variables created within the
same ``assign`` call.
See Also
--------
pandas.DataFrame.assign
"""
data = self.copy()
# do all calculations first...
results = data._calc_assign_results(kwargs)
# ... and then assign
data.update(results)
return data
def to_array(self, dim='variable', name=None):
"""Convert this dataset into an xray.DataArray
The data variables of this dataset will be broadcast against each other
and stacked along the first axis of the new array. All coordinates of
this dataset will remain coordinates.
Parameters
----------
dim : str, optional
Name of the new dimension.
name : str, optional
Name of the new data array.
Returns
-------
array : xray.DataArray
"""
from .dataarray import DataArray
data_vars = [self.variables[k] for k in self.data_vars]
broadcast_vars = broadcast_variables(*data_vars)
data = ops.stack([b.data for b in broadcast_vars], axis=0)
coords = dict(self.coords)
coords[dim] = list(self.data_vars)
dims = (dim,) + broadcast_vars[0].dims
return DataArray(data, coords, dims, attrs=self.attrs, name=name)
def _to_dataframe(self, ordered_dims):
columns = [k for k in self if k not in self.dims]
data = [self._variables[k].expand_dims(ordered_dims).values.reshape(-1)
for k in columns]
index = self.coords.to_index(ordered_dims)
return pd.DataFrame(OrderedDict(zip(columns, data)), index=index)
def to_dataframe(self):
"""Convert this dataset into a pandas.DataFrame.
Non-index variables in this dataset form the columns of the
DataFrame. The DataFrame is be indexed by the Cartesian product of
this dataset's indices.
"""
return self._to_dataframe(self.dims)
@classmethod
def from_dataframe(cls, dataframe):
"""Convert a pandas.DataFrame into an xray.Dataset
Each column will be converted into an independent variable in the
Dataset. If the dataframe's index is a MultiIndex, it will be expanded
into a tensor product of one-dimensional indices (filling in missing
values with NaN). This method will produce a Dataset very similar to
that on which the 'to_dataframe' method was called, except with
possibly redundant dimensions (since all dataset variables will have
the same dimensionality).
"""
# TODO: Add an option to remove dimensions along which the variables
# are constant, to enable consistent serialization to/from a dataframe,
# even if some variables have different dimensionality.
idx = dataframe.index
obj = cls()
if hasattr(idx, 'levels'):
# it's a multi-index
# expand the DataFrame to include the product of all levels
full_idx = pd.MultiIndex.from_product(idx.levels, names=idx.names)
dataframe = dataframe.reindex(full_idx)
dims = [name if name is not None else 'level_%i' % n
for n, name in enumerate(idx.names)]
for dim, lev in zip(dims, idx.levels):
obj[dim] = (dim, lev)
shape = [lev.size for lev in idx.levels]
else:
if idx.size:
dims = (idx.name if idx.name is not None else 'index',)
obj[dims[0]] = (dims, idx)
else:
dims = []
shape = -1
for name, series in iteritems(dataframe):
data = series.values.reshape(shape)
obj[name] = (dims, data)
return obj
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
ds = self.coords.to_dataset()
for k in self.data_vars:
ds._variables[k] = f(self._variables[k], *args, **kwargs)
return ds
return func
@staticmethod
def _binary_op(f, reflexive=False, join='inner', drop_na_vars=True):
@functools.wraps(f)
def func(self, other):
if isinstance(other, groupby.GroupBy):
return NotImplemented
if hasattr(other, 'indexes'):
self, other = align(self, other, join=join, copy=False)
empty_indexes = [d for d, s in self.dims.items() if s == 0]
if empty_indexes:
raise ValueError('no overlapping labels for some '
'dimensions: %s' % empty_indexes)
g = f if not reflexive else lambda x, y: f(y, x)
ds = self._calculate_binary_op(g, other, drop_na_vars=drop_na_vars)
return ds
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, groupby.GroupBy):
raise TypeError('in-place operations between a Dataset and '
'a grouped object are not permitted')
if hasattr(other, 'indexes'):
other = other.reindex_like(self, copy=False)
# we don't want to actually modify arrays in-place
g = ops.inplace_to_noninplace_op(f)
ds = self._calculate_binary_op(g, other, inplace=True)
self._replace_vars_and_dims(ds._variables, ds._coord_names,
ds._attrs, inplace=True)
return self
return func
def _calculate_binary_op(self, f, other, inplace=False, drop_na_vars=True):
def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):
dest_vars = OrderedDict()
performed_op = False
for k in lhs_data_vars:
if k in rhs_data_vars:
dest_vars[k] = f(lhs_vars[k], rhs_vars[k])
performed_op = True
elif inplace:
raise ValueError(
'datasets must have the same data variables '
'for in-place arithmetic operations: %s, %s'
% (list(lhs_data_vars), list(rhs_data_vars)))
elif not drop_na_vars:
# this shortcuts left alignment of variables for fillna
dest_vars[k] = lhs_vars[k]
if not performed_op:
raise ValueError(
'datasets have no overlapping data variables: %s, %s'
% (list(lhs_data_vars), list(rhs_data_vars)))
return dest_vars
if utils.is_dict_like(other) and not isinstance(other, Dataset):
# can't use our shortcut of doing the binary operation with
# Variable objects, so apply over our data vars instead.
new_data_vars = apply_over_both(self.data_vars, other,
self.data_vars, other)
return Dataset(new_data_vars)
other_coords = getattr(other, 'coords', None)
ds = self.coords.merge(other_coords)
if isinstance(other, Dataset):
new_vars = apply_over_both(self.data_vars, other.data_vars,
self.variables, other.variables)
else:
other_variable = getattr(other, 'variable', other)
new_vars = OrderedDict((k, f(self.variables[k], other_variable))
for k in self.data_vars)
ds._variables.update(new_vars)
return ds
ops.inject_all_ops_and_reduce_methods(Dataset, array_only=False)
|
kjordahl/xray
|
xray/core/dataset.py
|
Python
|
apache-2.0
| 72,806
|
[
"NetCDF"
] |
c0c0f63e6cbdaedca173959e6ee74a6252b60c1b64855d1a310821fa1798c5a2
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from django.db import transaction
from django.template import Template, RequestContext
from django.http import HttpRequest
from publications.models import Publication, Type, CustomLink, List
from publications.templatetags.publication_extras import tex_parse
class Tests(TestCase):
fixtures = ['initial_data.json', 'test_data.json']
urls = 'publications.tests.urls'
def setUp(self):
User.objects.create_superuser('admin', 'admin@test.de', 'admin')
def test_authors(self):
publication = Publication.objects.create(
type=Type.objects.get(pk=1),
authors=u'Jörn-Philipp Lies and Ralf M. Häfner and M. Bethge',
title=u'Slowness and sparseness have diverging effects on complex cell learning',
year=2014,
journal=u'PLoS Computational Biology',
external=0)
publication.clean()
publication.save()
self.assertEqual(len(publication.authors_list), 3)
self.assertTrue('J.-P. Lies' in publication.authors_list)
self.assertTrue(('J.-P.', 'Lies') in publication.authors_list_split)
def test_citekey(self):
publication = Publication.objects.create(
type=Type.objects.get(pk=1),
authors=u'A. Unique and B. Common',
title=u'Title 1',
year=2014,
journal=u'Journal',
external=0)
publication.clean()
publication.save()
self.assertEqual(publication.citekey, 'Unique2014a')
publication = Publication.objects.create(
type=Type.objects.get(pk=1),
authors=u'A. Unique and C. Common',
title=u'Title 2',
year=2014,
journal=u'Journal',
external=0)
publication.clean()
publication.save()
self.assertEqual(publication.citekey, 'Unique2014b')
publication = Publication.objects.create(
type=Type.objects.get(pk=1),
authors=u'A. Unique and D. Uncommon',
title=u'Title 3',
year=2013,
journal=u'Journal',
external=0)
publication.clean()
publication.save()
self.assertEqual(publication.citekey, 'Unique2013a')
def test_custom_links(self):
link = CustomLink.objects.create(publication_id=1, description='Test', url='http://test.com')
link.save()
self.assertEqual(self.client.get('/publications/').status_code, 200)
self.assertEqual(self.client.get('/publications/1/').status_code, 200)
def test_publications(self):
publication = Publication.objects.create(
type=Type.objects.get(pk=1),
authors=u'Jörn-Philipp Lies and Ralf M. Häfner and M. Bethge',
title=u'Slowness and sparseness have diverging effects on complex cell learning',
year=2014,
journal=u'PLoS Computational Biology',
external=0)
publication.clean()
publication.save()
self.assertEqual(self.client.get('/publications/').status_code, 200)
self.assertEqual(self.client.get('/publications/?plain').status_code, 200)
self.assertEqual(self.client.get('/publications/?bibtex').status_code, 200)
self.assertEqual(self.client.get('/publications/?mods').status_code, 200)
self.assertEqual(self.client.get('/publications/?ris').status_code, 200)
self.assertEqual(self.client.get('/publications/?rss').status_code, 200)
self.assertEqual(self.client.get('/publications/1/').status_code, 200)
self.assertEqual(self.client.get('/publications/1/?plain').status_code, 200)
self.assertEqual(self.client.get('/publications/1/?bibtex').status_code, 200)
self.assertEqual(self.client.get('/publications/1/?mods').status_code, 200)
self.assertEqual(self.client.get('/publications/1/?ris').status_code, 200)
response = self.client.get('/publications/j.-p.+lies/')
self.assertEqual(response.status_code, 200)
self.assertGreater(len(response.context['publications']), 0)
self.assertEqual(self.client.get('/publications/j.-p.+lies/?plain').status_code, 200)
self.assertEqual(self.client.get('/publications/j.-p.+lies/?bibtex').status_code, 200)
self.assertEqual(self.client.get('/publications/j.-p.+lies/?mods').status_code, 200)
self.assertEqual(self.client.get('/publications/j.-p.+lies/?ris').status_code, 200)
self.assertEqual(self.client.get('/publications/j.-p.+lies/?rss').status_code, 200)
self.assertEqual(self.client.get('/publications/tag/noise+correlations/').status_code, 200)
self.assertEqual(self.client.get('/publications/list/highlights/').status_code, 200)
self.assertEqual(self.client.get('/publications/year/2011/').status_code, 200)
self.assertEqual(self.client.get('/publications/year/2011/?plain').status_code, 200)
self.assertEqual(self.client.get('/publications/year/2011/?bibtex').status_code, 200)
publication = Publication.objects.create(
type=Type.objects.get(pk=1),
authors=u'A. Unique and B. Common',
title=u'Title 3',
year=2012,
journal=u'Journal',
external=0)
publication.clean()
publication.save()
publication = Publication.objects.create(
type=Type.objects.get(pk=1),
authors=u'A. Unique and C. Common and D. Someone',
title=u'Title 4',
year=2011,
journal=u'Journal',
external=0)
publication.clean()
publication.save()
link = CustomLink.objects.create(
publication_id=publication.id, description='Test', url='http://test.com')
link.save()
response = self.client.get('/publications/c.+common/')
self.assertTrue('C. Common' in str(response.content))
self.assertFalse('B. Common' in str(response.content))
def test_bibtex_import(self):
self.client.login(username='admin', password='admin')
count = Publication.objects.count()
response = self.client.post('/admin/publications/publication/import_bibtex/',
{'bibliography': TEST_BIBLIOGRAPHY}, follow=False)
self.assertEqual(Publication.objects.count() - count, TEST_BIBLIOGRAPHY_COUNT)
publications = Publication.objects.filter(citekey='test:2009')
self.assertEqual(len(publications), 1)
self.assertTrue('F. Last-Name' in publications[0].authors_list)
self.assertTrue('P. van der Markt III' in publications[0].authors_list)
self.assertTrue('Test' in publications[0].authors_list)
self.assertTrue('C. F. Gauss II' in publications[0].authors_list)
publications = Publication.objects.filter(citekey='kay2015good')
self.assertEqual(len(publications), 1)
self.assertTrue(publications[0].title.startswith('How Good is 85%?'))
def test_unapi(self):
self.assertEqual(self.client.get('/publications/unapi/').status_code, 200)
self.assertEqual(self.client.get('/publications/unapi/?id=1').status_code, 200)
self.assertEqual(self.client.get('/publications/unapi/?id=1&format=mods').status_code, 200)
self.assertEqual(self.client.get('/publications/unapi/?id=1&format=bibtex').status_code, 200)
self.assertEqual(self.client.get('/publications/unapi/?id=1&format=ris').status_code, 200)
self.assertEqual(self.client.get('/publications/unapi/?id=99999&format=bibtex').status_code, 404)
self.assertEqual(self.client.get('/publications/unapi/?id=1&format=foobar').status_code, 406)
def test_admin(self):
self.client.login(username='admin', password='admin')
self.assertEqual(self.client.get('/publications/').status_code, 200)
self.assertEqual(self.client.get('/admin/publications/type/6/move-up/', follow=True).status_code, 200)
self.assertEqual(self.client.get('/admin/publications/type/6/move-down/', follow=True).status_code, 200)
def test_extras(self):
link = CustomLink.objects.create(publication_id=1, description='Test', url='http://test.com')
link.save()
publication = Publication.objects.get(pk=1)
lists = List.objects.filter(list__iexact='highlights')
self.assertEqual(len(lists), 1)
# add publication to list
publication.lists.add(lists[0])
# render list
tpl = Template("""
{% load publication_extras %}
{% get_publication 1 %}
{% get_publication_list 'highlights' 'publications/publications_with_thumbnails.html' %}
{% get_publication 10 %}
{% get_publications %}
""")
self.assertGreater(len(tpl.render(RequestContext(HttpRequest())).strip()), 0)
# tex_parse is used to replace simple LaTeX code in publication titles
self.assertEqual(tex_parse(u'$L_p$-spherical'), u'L<sub>p</sub>-spherical')
self.assertEqual(tex_parse(u'$L^2$-spherical'), u'L<sup>2</sup>-spherical')
TEST_BIBLIOGRAPHY_COUNT = 9
TEST_BIBLIOGRAPHY = r"""
@article{Bethge2002c,
author = "M. Bethge and D. Rotermund and K. Pawelzik",
title = "Optimal short-term population coding: when Fisher information fails",
year = 2002,
journal = "Neural Computation",
month = "Oct",
keywords = "population coding, fisher information",
doi = "10.1162/08997660260293247",
url = "http://www.mitpressjournals.org/doi/abs/10.1162/08997660260293247"
}
@article{Simovski2011,
author = {Simovski, Constantin R.},
journal = {J. Opt.},
month = jan,
number = {1},
pages = {013001},
title = {{On electromagnetic characterization and
homogenization of nanostructured metamaterials}},
volume = {13},
year = {2011},
doi = {10.1088/2040-8978/13/1/013001},
issn = {2040-8978},
url = {http://stacks.iop.org/2040-8986/13/i=1/
a=013001?key=crossref.7321766a6630b917c6f066f2abc1e2cc},
}
@inproceedings{gerwinn2008bayesian,
title={Bayesian inference for spiking neuron models with a sparsity prior},
author={Gerwinn, Sebastian and Macke, Jakob and Seeger, Matthias and Bethge, Matthias},
booktitle={Proceedings of the 21st Annual Conference on Neural Information Processing Systems},
number={EPFL-CONF-161311},
pages={529--536},
year={2008}
}
@article{hafner2000dynamical,
title={A dynamical model of the inner Galaxy},
author={H{\"a}fner, Ralf and Evans, N Wyn and Dehnen, Walter and Binney, James},
journal={Monthly Notices of the Royal Astronomical Society},
volume={314},
number={3},
pages={433--452},
year={2000},
publisher={Oxford University Press}
}
@misc{test:2009,
title = "Test",
author = {Last-Name, First and Peter van der Markt III and Test and Gauss II CF},
year = 2009
}
@article{DBLP:journals/corr/KummererWB14,
author = {Matthias K{\"{u}}mmerer and
Thomas Wallis and
Matthias Bethge},
title = {How close are we to understanding image-based saliency?},
journal = {CoRR},
year = {2014},
volume = {abs/1409.7686},
url = {http://arxiv.org/abs/1409.7686},
timestamp = {Mon, 27 Oct 2014 13:50:21 +0100},
biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/KummererWB14},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@incollection{dougan2014objective,
title={Objective Functions},
author={Do{\u{g}}an, Haluk and Otu, Hasan H},
booktitle={Multiple Sequence Alignment Methods},
pages={45--58},
year={2014},
publisher={Springer}
}
@inproceedings{DBLP:conf/patmos/ShahWSB14,
author = {Syed Abbas Ali Shah and
Jan Wagner and
Thomas Schuster and
Mladen Berekovic},
title = {A lightweight-system-level power and area estimation methodology for
application specific instruction set processors},
booktitle = {24th International Workshop on Power and Timing Modeling, Optimization
and Simulation, PATMOS), Palma de Mallorca, Spain, September 29 -
Oct. 1, 2014},
pages = {1--5},
publisher = {{IEEE}},
year = {2014},
url = {http://dx.doi.org/10.1109/PATMOS.2014.6951886},
doi = {10.1109/PATMOS.2014.6951886},
timestamp = {Tue, 18 Nov 2014 12:34:31 +0100},
biburl = {http://dblp.uni-trier.de/rec/bib/conf/patmos/ShahWSB14},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@inproceedings{kay2015good,
title={How Good is 85\%? A Survey Tool to Connect Classifier Evaluation to Acceptability of Accuracy},
author={Kay, Matthew and Patel, Shwetak N and Kientz, Julie A},
booktitle={Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems},
pages={347--356},
year={2015},
organization={ACM}
}
"""
|
lucastheis/django-publications
|
publications/tests/tests.py
|
Python
|
mit
| 12,057
|
[
"Galaxy",
"NEURON"
] |
fe9fe213b1c7604b475162e1ac91481bc6cc9311980a16f87d0c92779facddc6
|
#!/usr/bin/env python
#**************************************************************************
# Tintwizard
#
# Copyright (C) 2009 Euan Freeman <euan04@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*************************************************************************/
# Last modified: 14th June 2010
import pygtk
pygtk.require('2.0')
import gtk
import os
import sys
import signal
import webbrowser
import math
import shutil
# Project information
NAME = "tintwizard"
AUTHORS = ["Euan Freeman <euan04@gmail.com>"]
VERSION = "0.3.4"
COMMENTS = "tintwizard generates config files for the lightweight panel replacement tint2"
WEBSITE = "http://code.google.com/p/tintwizard/"
# Default values for text entry fields
BG_ROUNDING = "0"
BG_BORDER = "0"
PANEL_SIZE_X = "0"
PANEL_SIZE_Y = "40"
PANEL_MARGIN_X = "0"
PANEL_MARGIN_Y = "0"
PANEL_PADDING_X = "0"
PANEL_PADDING_Y = "0"
PANEL_MONITOR = "all"
PANEL_AUTOHIDE_SHOW = "0.0"
PANEL_AUTOHIDE_HIDE = "0.0"
PANEL_AUTOHIDE_HEIGHT = "0"
TASKBAR_PADDING_X = "0"
TASKBAR_PADDING_Y = "0"
TASKBAR_SPACING = "0"
TASK_BLINKS = "7"
TASK_MAXIMUM_SIZE_X = "200"
TASK_MAXIMUM_SIZE_Y = "32"
TASK_PADDING_X = "0"
TASK_PADDING_Y = "0"
TASK_SPACING = "0"
TRAY_PADDING_X = "0"
TRAY_PADDING_Y = "0"
TRAY_SPACING = "0"
TRAY_MAX_ICON_SIZE = "0"
TRAY_ICON_ALPHA = "100"
TRAY_ICON_SAT = "0"
TRAY_ICON_BRI = "0"
ICON_ALPHA = "100"
ICON_SAT = "0"
ICON_BRI = "0"
ACTIVE_ICON_ALPHA = "100"
ACTIVE_ICON_SAT = "0"
ACTIVE_ICON_BRI = "0"
URGENT_ICON_ALPHA = "100"
URGENT_ICON_SAT = "0"
URGENT_ICON_BRI = "0"
ICONIFIED_ICON_ALPHA = "100"
ICONIFIED_ICON_SAT = "0"
ICONIFIED_ICON_BRI = "0"
CLOCK_FMT_1 = "%H:%M"
CLOCK_FMT_2 = "%a %d %b"
CLOCK_TOOLTIP = ""
CLOCK_TIME1_TIMEZONE = ""
CLOCK_TIME2_TIMEZONE = ""
CLOCK_TOOLTIP_TIMEZONE = ""
CLOCK_PADDING_X = "0"
CLOCK_PADDING_Y = "0"
CLOCK_LCLICK = ""
CLOCK_RCLICK = ""
TOOLTIP_PADDING_X = "0"
TOOLTIP_PADDING_Y = "0"
TOOLTIP_SHOW_TIMEOUT = "0"
TOOLTIP_HIDE_TIMEOUT = "0"
BATTERY_LOW = "20"
BATTERY_HIDE = "90"
BATTERY_ACTION = 'notify-send "battery low"'
BATTERY_PADDING_X = "0"
BATTERY_PADDING_Y = "0"
class TintWizardPrefGUI(gtk.Window):
"""The dialog window which lets the user change the default preferences."""
def __init__(self, tw):
"""Create and shows the window."""
self.tw = tw
# Create top-level window
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
self.set_title("Preferences")
self.connect("delete_event", self.quit)
self.layout = gtk.Table(2, 2, False)
self.table = gtk.Table(5, 2, False)
self.table.set_row_spacings(5)
self.table.set_col_spacings(5)
createLabel(self.table, text="Default Font", gridX=0, gridY=0)
self.font = gtk.FontButton(self.tw.defaults["font"])
self.font.set_alignment(0, 0.5)
self.table.attach(self.font, 1, 2, 0, 1, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
createLabel(self.table, text="Default Background Color", gridX=0, gridY=1)
self.bgColor = gtk.ColorButton(gtk.gdk.color_parse(self.tw.defaults["bgColor"]))
self.bgColor.set_alignment(0, 0.5)
self.table.attach(self.bgColor, 1, 2, 1, 2, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
createLabel(self.table, text="Default Foreground Color", gridX=0, gridY=2)
self.fgColor = gtk.ColorButton(gtk.gdk.color_parse(self.tw.defaults["fgColor"]))
self.fgColor.set_alignment(0, 0.5)
self.table.attach(self.fgColor, 1, 2, 2, 3, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
createLabel(self.table, text="Default Border Color", gridX=0, gridY=3)
self.borderColor = gtk.ColorButton(gtk.gdk.color_parse(self.tw.defaults["borderColor"]))
self.borderColor.set_alignment(0, 0.5)
self.table.attach(self.borderColor, 1, 2, 3, 4, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
createLabel(self.table, text="Number of background styles", gridX=0, gridY=4)
self.bgCount = createEntry(self.table, maxSize=6, width=8, text=str(self.tw.defaults["bgCount"]), gridX=1, gridY=4, xExpand=True, yExpand=True)
self.layout.attach(self.table, 0, 2, 0, 1, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND, xpadding=20, ypadding=5)
createButton(self.layout, text="Save", stock=gtk.STOCK_SAVE, name="save", gridX=0, gridY=1, xExpand=True, yExpand=True, handler=self.save)
createButton(self.layout, text="Cancel", stock=gtk.STOCK_CANCEL, name="cancel", gridX=1, gridY=1, xExpand=True, yExpand=True, handler=self.quit)
self.add(self.layout)
self.show_all()
def quit(self, widget=None, event=None):
"""Destroys the window."""
self.destroy()
def save(self, action=None):
"""Called when the Save button is clicked."""
if confirmDialog(self, "Overwrite configuration file?") == gtk.RESPONSE_YES:
self.tw.defaults["font"] = self.font.get_font_name()
self.tw.defaults["bgColor"] = rgbToHex(self.bgColor.get_color().red, self.bgColor.get_color().green, self.bgColor.get_color().blue)
self.tw.defaults["fgColor"] = rgbToHex(self.fgColor.get_color().red, self.fgColor.get_color().green, self.fgColor.get_color().blue)
self.tw.defaults["borderColor"] = rgbToHex(self.borderColor.get_color().red, self.borderColor.get_color().green, self.borderColor.get_color().blue)
try:
self.tw.defaults["bgCount"] = int(self.bgCount.get_text())
except:
errorDialog(self, "Invalid value for background count")
return
self.tw.writeConf()
self.quit()
class TintWizardGUI(gtk.Window):
"""The main window for the application."""
def __init__(self):
"""Create and show the window."""
self.filename = None
self.curDir = None
self.toSave = False
if len(sys.argv) > 1:
self.filename = sys.argv[1]
self.oneConfigFile = True
else:
self.oneConfigFile = False
# Read conf file and set default values
self.readConf()
if self.defaults["bgColor"] in [None, "None"]:
self.defaults["bgColor"] = "#000000"
if self.defaults["fgColor"] in [None, "None"]:
self.defaults["fgColor"] = "#ffffff"
if self.defaults["borderColor"] in [None, "None"]:
self.defaults["borderColor"] = "#ffffff"
if os.path.exists(os.path.expandvars("${HOME}") + "/.config/tint2"):
self.curDir = os.path.expandvars("${HOME}") + "/.config/tint2"
else:
errorDialog("$HOME/.config/tint2/ directory not found! Is tint2 installed correctly?")
Sys.exit(1)
try:
self.defaults["bgCount"] = int(self.defaults["bgCount"])
except:
self.defaults["bgCount"] = 2
# Get the full location of the tint2 binary
which = os.popen('which tint2')
self.tint2Bin = which.readline().strip()
which.close()
if len(self.tint2Bin) == 0:
errorDialog(self, "tint2 could not be found. Are you sure it is installed?")
sys.exit(1)
# Create top-level window
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
self.set_title("tintwizard")
self.connect("delete_event", self.quit)
# self.table is our main layout manager
self.table = gtk.Table(4, 1, False)
# Set up the dictionary to hold all registered widgets
self.propUI = {}
# Create menus and toolbar items
ui = """
<ui>
<menubar name="MenuBar">
<menu action="File">
<menuitem action="New" />
<menuitem action="Open" />
<separator />
<menuitem action="Save" />
<menuitem action="Save As..." />
<separator />
<menuitem action="Quit" />
</menu>
<menu action="Tint2">
<menuitem action="OpenDefault" />
<menuitem action="SaveDefault" />
<separator />
<menuitem action="Apply" />
</menu>
<menu action="Tools">
<menuitem action="FontChange" />
<separator />
<menuitem action="Defaults" />
</menu>
<menu action="HelpMenu">
<menuitem action="Help" />
<menuitem action="Report Bug" />
<separator />
<menuitem action="About" />
</menu>
</menubar>
<toolbar name="ToolBar">
<toolitem action="New" />
<toolitem action="Open" />
<toolitem action="Save" />
<separator />
<toolitem action="Apply" />
</toolbar>
</ui>
"""
# Set up UI manager
self.uiManager = gtk.UIManager()
accelGroup = self.uiManager.get_accel_group()
self.add_accel_group(accelGroup)
self.ag = gtk.ActionGroup("File")
self.ag.add_actions([("File", None, "_File"),
("New",gtk.STOCK_NEW, "_New", None, "Create a new config", self.new),
("Open", gtk.STOCK_OPEN, "_Open", None, "Open an existing config", self.openFile),
("Save", gtk.STOCK_SAVE, "_Save", None, "Save the current config", self.save),
("Save As...", gtk.STOCK_SAVE_AS, "Save As", None, "Save the current config as...", self.saveAs),
("SaveDefault", None, "Save As tint2 Default", None, "Save the current config as the tint2 default", self.saveAsDef),
("OpenDefault", None, "Open tint2 Default", None, "Open the current tint2 default config", self.openDef),
("Apply", gtk.STOCK_APPLY, "Apply Config", None, "Apply the current config to tint2", self.apply),
("Quit", gtk.STOCK_QUIT, "_Quit", None, "Quit the program", self.quit),
("Tools", None, "_Tools"),
("Tint2", None, "Tint_2"),
("HelpMenu", None, "_Help"),
("FontChange",gtk.STOCK_SELECT_FONT, "Change All Fonts", None, "Change all fonts at once.", self.changeAllFonts),
("Defaults",gtk.STOCK_PREFERENCES, "Change Defaults", None, "Change tintwizard defaults.", self.changeDefaults),
("Help",gtk.STOCK_HELP, "_Help", None, "Get help with tintwizard", self.help),
("Report Bug",None, "Report Bug", None, "Report a problem with tintwizard", self.reportBug),
("About",gtk.STOCK_ABOUT, "_About Tint Wizard", None, "Find out more about Tint Wizard", self.about)])
# Add main UI
self.uiManager.insert_action_group(self.ag, -1)
self.uiManager.add_ui_from_string(ui)
if not self.oneConfigFile:
# Attach menubar and toolbar to main window
self.table.attach(self.uiManager.get_widget("/MenuBar"), 0, 4, 0, 1)
self.table.attach(self.uiManager.get_widget("/ToolBar"), 0, 4, 1, 2)
# Create notebook
self.notebook = gtk.Notebook()
self.notebook.set_tab_pos(gtk.POS_TOP)
# Create notebook pages
# Background Options
self.tableBgs = gtk.Table(rows=1, columns=1, homogeneous=False)
self.tableBgs.set_row_spacings(5)
self.tableBgs.set_col_spacings(5)
self.bgNotebook = gtk.Notebook()
self.bgNotebook.set_scrollable(True)
self.tableBgs.attach(self.bgNotebook, 0, 2, 0, 1)
self.bgs = []
# Add buttons for adding/deleting background styles
createButton(self.tableBgs, text="New Background", stock=gtk.STOCK_NEW, name="addBg", gridX=0, gridY=1, xExpand=True, yExpand=True, handler=self.addBgClick)
createButton(self.tableBgs, text="Delete Background", stock=gtk.STOCK_DELETE, name="delBg", gridX=1, gridY=1, xExpand=True, yExpand=True, handler=self.delBgClick)
# Panel
self.createPanelDisplayWidgets()
self.createPanelSettingsWidgets()
self.createPanelAutohideWidgets()
# Taskbar
self.createTaskbarWidgets()
# Tasks
self.createTaskSettingsWidgets()
self.createNormalTasksWidgets()
self.createActiveTasksWidgets()
self.createUrgentTasksWidgets()
self.createIconifiedTasksWidgets()
# System Tray
self.createSystemTrayWidgets()
# Clock
self.createClockDisplayWidgets()
self.createClockSettingsWidgets()
# Mouse
self.createMouseWidgets()
# Tooltips
self.createTooltipsWidgets()
# Battery
self.createBatteryWidgets()
# View Config
self.configArea = gtk.ScrolledWindow()
self.configBuf = gtk.TextBuffer()
self.configTextView = gtk.TextView(self.configBuf)
self.configArea.add_with_viewport(self.configTextView)
# Add backgrounds to notebooks
for i in range(self.defaults["bgCount"]):
self.addBgClick(None, init=True)
self.bgNotebook.set_current_page(0)
# Create sub-notebooks
self.panelNotebook = gtk.Notebook()
self.panelNotebook.set_tab_pos(gtk.POS_TOP)
self.panelNotebook.set_current_page(0)
self.panelNotebook.append_page(self.tablePanelDisplay, gtk.Label("Panel Display"))
self.panelNotebook.append_page(self.tablePanelSettings, gtk.Label("Panel Settings"))
self.panelNotebook.append_page(self.tablePanelAutohide, gtk.Label("Panel Autohide"))
self.taskNotebook = gtk.Notebook()
self.taskNotebook.set_tab_pos(gtk.POS_TOP)
self.taskNotebook.set_current_page(0)
self.taskNotebook.append_page(self.tableTask, gtk.Label("Task Settings"))
self.taskNotebook.append_page(self.tableTaskDefault, gtk.Label("Normal Tasks"))
self.taskNotebook.append_page(self.tableTaskActive, gtk.Label("Active Tasks"))
self.taskNotebook.append_page(self.tableTaskUrgent, gtk.Label("Urgent Tasks"))
self.taskNotebook.append_page(self.tableTaskIconified, gtk.Label("Iconified Tasks"))
self.clockNotebook = gtk.Notebook()
self.clockNotebook.set_tab_pos(gtk.POS_TOP)
self.clockNotebook.set_current_page(0)
self.clockNotebook.append_page(self.tableClockDisplays, gtk.Label("Clock Display"))
self.clockNotebook.append_page(self.tableClockSettings, gtk.Label("Clock Settings"))
# Add pages to notebook
self.notebook.append_page(self.tableBgs, gtk.Label("Backgrounds"))
self.notebook.append_page(self.panelNotebook, gtk.Label("Panel"))
self.notebook.append_page(self.tableTaskbar, gtk.Label("Taskbar"))
self.notebook.append_page(self.taskNotebook, gtk.Label("Tasks"))
self.notebook.append_page(self.tableTray, gtk.Label("System Tray"))
self.notebook.append_page(self.clockNotebook, gtk.Label("Clock"))
self.notebook.append_page(self.tableMouse, gtk.Label("Mouse"))
self.notebook.append_page(self.tableTooltip, gtk.Label("Tooltips"))
self.notebook.append_page(self.tableBattery, gtk.Label("Battery"))
self.notebook.append_page(self.configArea, gtk.Label("View Config"))
self.notebook.connect("switch-page", self.switchPage)
# Add notebook to window and show
self.table.attach(self.notebook, 0, 4, 2, 3, xpadding=5, ypadding=5)
if self.oneConfigFile:
# Add button Apply and Close
self.box1 = gtk.HBox(False, 20)
self.table.attach(self.box1, 0, 4, 3, 4, xpadding=5, ypadding=5)
temp = gtk.Button("Apply", gtk.STOCK_APPLY)
temp.set_name("applyBg")
temp.connect("clicked", self.apply)
self.box1.pack_start(temp, True, True, 0)
temp = gtk.Button("Close", gtk.STOCK_CLOSE)
temp.set_name("closeBg")
temp.connect("clicked", self.quit)
self.box1.pack_start(temp, True, True, 0)
# Create and add the status bar to the bottom of the main window
self.statusBar = gtk.Statusbar()
self.statusBar.set_has_resize_grip(True)
self.updateStatusBar("New Config File [*]")
self.table.attach(self.statusBar, 0, 4, 4, 5)
self.add(self.table)
self.show_all()
# If tintwizard was launched with a tint2 config filename
# as an argument, load that config.
if self.oneConfigFile:
self.readTint2Config()
self.generateConfig()
def createPanelDisplayWidgets(self):
"""Create the Panel Display widgets."""
self.tablePanelDisplay = gtk.Table(rows=6, columns=3, homogeneous=False)
self.tablePanelDisplay.set_row_spacings(5)
self.tablePanelDisplay.set_col_spacings(5)
createLabel(self.tablePanelDisplay, text="Position", gridX=0, gridY=0, xPadding=10)
self.panelPosY = createComboBox(self.tablePanelDisplay, ["bottom", "top", "center"], gridX=1, gridY=0, handler=self.changeOccurred)
self.panelPosX = createComboBox(self.tablePanelDisplay, ["left", "right", "center"], gridX=2, gridY=0, handler=self.changeOccurred)
# Note: registered below
createLabel(self.tablePanelDisplay, text="Panel Orientation", gridX=0, gridY=1, xPadding=10)
self.panelOrientation = createComboBox(self.tablePanelDisplay, ["horizontal", "vertical"], gridX=1, gridY=1, handler=self.changeOccurred)
self.registerComponent("panel_position", (self.panelPosY, self.panelPosX, self.panelOrientation))
self.panelSizeLabel = createLabel(self.tablePanelDisplay, text="Size (width, height)", gridX=0, gridY=2, xPadding=10)
self.panelSizeX = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_SIZE_X, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.panelSizeY = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_SIZE_Y, gridX=2, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_size", (self.panelSizeX, self.panelSizeY))
createLabel(self.tablePanelDisplay, text="Margin (x, y)", gridX=0, gridY=3, xPadding=10)
self.panelMarginX = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_MARGIN_X, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.panelMarginY = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_MARGIN_Y, gridX=2, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_margin", (self.panelMarginX, self.panelMarginY))
createLabel(self.tablePanelDisplay, text="Padding (x, y)", gridX=0, gridY=4, xPadding=10)
self.panelPadX = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_PADDING_X, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.panelPadY = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_PADDING_Y, gridX=2, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tablePanelDisplay, text="Horizontal Spacing", gridX=0, gridY=5, xPadding=10)
self.panelSpacing = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=TASKBAR_SPACING, gridX=1, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_padding", (self.panelPadX, self.panelPadY, self.panelSpacing))
createLabel(self.tablePanelDisplay, text="Panel Background ID", gridX=0, gridY=6, xPadding=10)
self.panelBg = createComboBox(self.tablePanelDisplay, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=6, handler=self.changeOccurred)
self.registerComponent("panel_background_id", self.panelBg)
def createPanelSettingsWidgets(self):
"""Create the Panel Settings widgets."""
self.tablePanelSettings = gtk.Table(rows=5, columns=3, homogeneous=False)
self.tablePanelSettings.set_row_spacings(5)
self.tablePanelSettings.set_col_spacings(5)
createLabel(self.tablePanelSettings, text="Window Manager Menu", gridX=0, gridY=0, xPadding=10)
self.panelMenu = createCheckButton(self.tablePanelSettings, active=False, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("wm_menu", self.panelMenu)
createLabel(self.tablePanelSettings, text="Place In Window Manager Dock", gridX=0, gridY=1, xPadding=10)
self.panelDock = createCheckButton(self.tablePanelSettings, active=False, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_dock", self.panelDock)
createLabel(self.tablePanelSettings, text="Panel Layer", gridX=0, gridY=2, xPadding=10)
self.panelLayer = createComboBox(self.tablePanelSettings, ["bottom", "top", "normal"], gridX=1, gridY=2, handler=self.changeOccurred)
self.registerComponent("panel_layer", self.panelLayer)
createLabel(self.tablePanelSettings, text="Strut Policy", gridX=0, gridY=3, xPadding=10)
self.panelAutohideStrut = createComboBox(self.tablePanelSettings, ["none", "minimum", "follow_size"], gridX=1, gridY=3, handler=self.changeOccurred)
self.registerComponent("strut_policy", self.panelAutohideStrut)
createLabel(self.tablePanelSettings, text="Panel Monitor (all, 1, 2, ...)", gridX=0, gridY=4, xPadding=10)
self.panelMonitor = createEntry(self.tablePanelSettings, maxSize=6, width=8, text=PANEL_MONITOR, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_monitor", self.panelMonitor)
def createPanelAutohideWidgets(self):
"""Create the Panel Autohide widgets."""
self.tablePanelAutohide = gtk.Table(rows=4, columns=3, homogeneous=False)
self.tablePanelAutohide.set_row_spacings(5)
self.tablePanelAutohide.set_col_spacings(5)
createLabel(self.tablePanelAutohide, text="Autohide Panel", gridX=0, gridY=0, xPadding=10)
self.panelAutohide = createCheckButton(self.tablePanelAutohide, active=False, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("autohide", self.panelAutohide)
createLabel(self.tablePanelAutohide, text="Autohide Show Timeout (seconds)", gridX=0, gridY=1, xPadding=10)
self.panelAutohideShow = createEntry(self.tablePanelAutohide, maxSize=6, width=8, text=PANEL_AUTOHIDE_SHOW, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("autohide_show_timeout", self.panelAutohideShow)
createLabel(self.tablePanelAutohide, text="Autohide Hide Timeout (seconds)", gridX=0, gridY=2, xPadding=10)
self.panelAutohideHide = createEntry(self.tablePanelAutohide, maxSize=6, width=8, text=PANEL_AUTOHIDE_HIDE, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("autohide_hide_timeout", self.panelAutohideHide)
createLabel(self.tablePanelAutohide, text="Autohide Hidden Height", gridX=0, gridY=3, xPadding=10)
self.panelAutohideHeight = createEntry(self.tablePanelAutohide, maxSize=6, width=8, text=PANEL_AUTOHIDE_HEIGHT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("autohide_height", self.panelAutohideHeight)
def createTaskbarWidgets(self):
"""Create the Taskbar widgets."""
self.tableTaskbar = gtk.Table(rows=5, columns=3, homogeneous=False)
self.tableTaskbar.set_row_spacings(5)
self.tableTaskbar.set_col_spacings(5)
createLabel(self.tableTaskbar, text="Taskbar Mode", gridX=0, gridY=0, xPadding=10)
self.taskbarMode = createComboBox(self.tableTaskbar, ["single_desktop", "multi_desktop"], gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("taskbar_mode", self.taskbarMode)
createLabel(self.tableTaskbar, text="Padding (x, y)", gridX=0, gridY=1, xPadding=10)
self.taskbarPadX = createEntry(self.tableTaskbar, maxSize=6, width=8, text=TASKBAR_PADDING_X, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.taskbarPadY = createEntry(self.tableTaskbar, maxSize=6, width=8, text=TASKBAR_PADDING_Y, gridX=2, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskbar, text="Horizontal Spacing", gridX=0, gridY=2, xPadding=10)
self.taskbarSpacing = createEntry(self.tableTaskbar, maxSize=6, width=8, text=TASK_SPACING, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("taskbar_padding", (self.taskbarPadX, self.taskbarPadY, self.taskbarSpacing))
createLabel(self.tableTaskbar, text="Taskbar Background ID", gridX=0, gridY=3, xPadding=10)
self.taskbarBg = createComboBox(self.tableTaskbar, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=3, handler=self.changeOccurred)
self.registerComponent("taskbar_background_id", self.taskbarBg)
createLabel(self.tableTaskbar, text="Active Taskbar Background ID", gridX=0, gridY=4, xPadding=10)
self.taskbarActiveBg = createComboBox(self.tableTaskbar, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=4, handler=self.changeOccurred)
self.taskbarActiveBgEnable = createCheckButton(self.tableTaskbar, text="Enable", active=False, gridX=2, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("taskbar_active_background_id", self.taskbarActiveBg)
def createTaskSettingsWidgets(self):
"""Create the Task Settings widgets."""
self.tableTask = gtk.Table(rows=12, columns=3, homogeneous=False)
self.tableTask.set_row_spacings(5)
self.tableTask.set_col_spacings(5)
createLabel(self.tableTask, text="Number of 'Blinks' on Urgent Event", gridX=0, gridY=0, xPadding=10)
self.taskBlinks = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_BLINKS, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("urgent_nb_of_blink", self.taskBlinks)
createLabel(self.tableTask, text="Show Icons", gridX=0, gridY=1, xPadding=10)
self.taskIconCheckButton = createCheckButton(self.tableTask, active=True, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_icon", self.taskIconCheckButton)
createLabel(self.tableTask, text="Show Text", gridX=0, gridY=2, xPadding=10)
self.taskTextCheckButton = createCheckButton(self.tableTask, active=True, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_text", self.taskTextCheckButton)
createLabel(self.tableTask, text="Centre Text", gridX=0, gridY=3, xPadding=10)
self.taskCentreCheckButton = createCheckButton(self.tableTask, active=True, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_centered", self.taskCentreCheckButton)
createLabel(self.tableTask, text="Font", gridX=0, gridY=4, xPadding=10)
self.fontButton = gtk.FontButton()
if self.defaults["font"] in [None, "None"]: # If there was no font specified in the config file
self.defaults["font"] = self.fontButton.get_font_name() # Use the gtk default
self.fontButton = createFontButton(self.tableTask, font=self.defaults["font"], gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("task_font", self.fontButton)
createLabel(self.tableTask, text="Show Font Shadow", gridX=0, gridY=5, xPadding=10)
self.fontShadowCheckButton = createCheckButton(self.tableTask, active=False, gridX=1, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("font_shadow", self.fontShadowCheckButton)
createLabel(self.tableTask, text="Maximum Size (x, y)", gridX=0, gridY=6, xPadding=10)
self.taskMaxSizeX = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_MAXIMUM_SIZE_X, gridX=1, gridY=6, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.taskMaxSizeY = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_MAXIMUM_SIZE_Y, gridX=2, gridY=6, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_maximum_size", (self.taskMaxSizeX, self.taskMaxSizeY))
createLabel(self.tableTask, text="Padding (x, y)", gridX=0, gridY=7, xPadding=10)
self.taskPadX = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_PADDING_X, gridX=1, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.taskPadY = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_PADDING_Y, gridX=2, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_padding", (self.taskPadX, self.taskPadY))
def createNormalTasksWidgets(self):
"""Create the Normal Tasks widgets."""
self.tableTaskDefault = gtk.Table(rows=6, columns=3, homogeneous=False)
self.tableTaskDefault.set_row_spacings(5)
self.tableTaskDefault.set_col_spacings(5)
createLabel(self.tableTaskDefault, text="Normal Task Background ID", gridX=0, gridY=0, xPadding=10)
self.taskBg = createComboBox(self.tableTaskDefault, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("task_background_id", self.taskBg)
createLabel(self.tableTaskDefault, text="Note: Default values of 0 for each of these settings leaves icons unchanged!", gridX=0, gridY=1, sizeX=3, xPadding=10)
createLabel(self.tableTaskDefault, text="Normal Icon Alpha (0 to 100)", gridX=0, gridY=2, xPadding=10)
self.iconHue = createEntry(self.tableTaskDefault, maxSize=6, width=8, text=ICON_ALPHA, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskDefault, text="Normal Icon Saturation (-100 to 100)", gridX=0, gridY=3, xPadding=10)
self.iconSat = createEntry(self.tableTaskDefault, maxSize=6, width=8, text=ICON_SAT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskDefault, text="Normal Icon Brightness (-100 to 100)", gridX=0, gridY=4, xPadding=10)
self.iconBri = createEntry(self.tableTaskDefault, maxSize=6, width=8, text=ICON_BRI, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_icon_asb", (self.iconHue, self.iconSat, self.iconBri))
createLabel(self.tableTaskDefault, text="Normal Font Color", gridX=0, gridY=5, xPadding=10)
self.fontCol = createEntry(self.tableTaskDefault, maxSize=7, width=9, text="", gridX=1, gridY=5, xExpand=True, yExpand=False, handler=None, name="fontCol")
self.fontCol.connect("activate", self.colorTyped)
self.fontColButton = createColorButton(self.tableTaskDefault, color=self.defaults["fgColor"], useAlpha=True, name="fontCol", gridX=2, gridY=5, handler=self.colorChange)
self.fontCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.fontCol.connect("changed", self.changeOccurred)
self.registerComponent("task_font_color", (self.fontCol, self.fontColButton))
def createActiveTasksWidgets(self):
"""Create the Active Tasks widgets."""
self.tableTaskActive = gtk.Table(rows=6, columns=3, homogeneous=False)
self.tableTaskActive.set_row_spacings(5)
self.tableTaskActive.set_col_spacings(5)
createLabel(self.tableTaskActive, text="Active Task Background ID", gridX=0, gridY=0, xPadding=10)
self.taskActiveBg = createComboBox(self.tableTaskActive, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("task_active_background_id", self.taskActiveBg)
createLabel(self.tableTaskActive, text="Note: Default values of 0 for each of these settings leaves icons unchanged!", gridX=0, gridY=1, sizeX=3, xPadding=10)
createLabel(self.tableTaskActive, text="Active Icon Alpha (0 to 100)", gridX=0, gridY=2, xPadding=10)
self.activeIconHue = createEntry(self.tableTaskActive, maxSize=6, width=8, text=ACTIVE_ICON_ALPHA, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskActive, text="Active Icon Saturation (-100 to 100)", gridX=0, gridY=3, xPadding=10)
self.activeIconSat = createEntry(self.tableTaskActive, maxSize=6, width=8, text=ACTIVE_ICON_SAT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskActive, text="Active Icon Brightness (-100 to 100)", gridX=0, gridY=4, xPadding=10)
self.activeIconBri = createEntry(self.tableTaskActive, maxSize=6, width=8, text=ACTIVE_ICON_BRI, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_active_icon_asb", (self.activeIconHue, self.activeIconSat, self.activeIconBri))
createLabel(self.tableTaskActive, text="Active Font Color", gridX=0, gridY=5, xPadding=10)
self.fontActiveCol = createEntry(self.tableTaskActive, maxSize=7, width=9, text="", gridX=1, gridY=5, xExpand=True, yExpand=False, handler=None, name="fontActiveCol")
self.fontActiveCol.connect("activate", self.colorTyped)
self.fontActiveColButton = createColorButton(self.tableTaskActive, color=self.defaults["fgColor"], useAlpha=True, name="fontActiveCol", gridX=2, gridY=5, handler=self.colorChange)
self.fontActiveCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.fontActiveCol.connect("changed", self.changeOccurred)
self.registerComponent("task_active_font_color", (self.fontActiveCol, self.fontActiveColButton))
def createUrgentTasksWidgets(self):
"""Create the Urgent Tasks widgets."""
self.tableTaskUrgent = gtk.Table(rows=6, columns=3, homogeneous=False)
self.tableTaskUrgent.set_row_spacings(5)
self.tableTaskUrgent.set_col_spacings(5)
createLabel(self.tableTaskUrgent, text="Urgent Task Background ID", gridX=0, gridY=0, xPadding=10)
self.taskUrgentBg = createComboBox(self.tableTaskUrgent, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("task_urgent_background_id", self.taskUrgentBg)
createLabel(self.tableTaskUrgent, text="Note: Default values of 0 for each of these settings leaves icons unchanged!", gridX=0, gridY=1, sizeX=3, xPadding=10)
createLabel(self.tableTaskUrgent, text="Urgent Icon Alpha (0 to 100)", gridX=0, gridY=2, xPadding=10)
self.urgentIconHue = createEntry(self.tableTaskUrgent, maxSize=6, width=8, text=URGENT_ICON_ALPHA, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskUrgent, text="Urgent Icon Saturation (-100 to 100)", gridX=0, gridY=3, xPadding=10)
self.urgentIconSat = createEntry(self.tableTaskUrgent, maxSize=6, width=8, text=URGENT_ICON_SAT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskUrgent, text="Urgent Icon Brightness (-100 to 100)", gridX=0, gridY=4, xPadding=10)
self.urgentIconBri = createEntry(self.tableTaskUrgent, maxSize=6, width=8, text=URGENT_ICON_BRI, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_urgent_icon_asb", (self.urgentIconHue, self.urgentIconSat, self.urgentIconBri))
createLabel(self.tableTaskUrgent, text="Urgent Font Color", gridX=0, gridY=5, xPadding=10)
self.fontUrgentCol = createEntry(self.tableTaskUrgent, maxSize=7, width=9, text="", gridX=1, gridY=5, xExpand=True, yExpand=False, handler=None, name="fontUrgentCol")
self.fontUrgentCol.connect("activate", self.colorTyped)
self.fontUrgentColButton = createColorButton(self.tableTaskUrgent, color=self.defaults["fgColor"], useAlpha=True, name="fontUrgentCol", gridX=2, gridY=5, handler=self.colorChange)
self.fontUrgentCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.fontUrgentCol.connect("changed", self.changeOccurred)
self.registerComponent("task_urgent_font_color", (self.fontUrgentCol, self.fontUrgentColButton))
def createIconifiedTasksWidgets(self):
"""Create the Iconified Tasks widgets."""
self.tableTaskIconified = gtk.Table(rows=6, columns=3, homogeneous=False)
self.tableTaskIconified.set_row_spacings(5)
self.tableTaskIconified.set_col_spacings(5)
createLabel(self.tableTaskIconified, text="Iconified Task Background ID", gridX=0, gridY=0, xPadding=10)
self.taskIconifiedBg = createComboBox(self.tableTaskIconified, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("task_iconified_background_id", self.taskIconifiedBg)
createLabel(self.tableTaskIconified, text="Note: Default values of 0 for each of these settings leaves icons unchanged!", gridX=0, gridY=1, sizeX=3, xPadding=10)
createLabel(self.tableTaskIconified, text="Iconified Icon Alpha (0 to 100)", gridX=0, gridY=2, xPadding=10)
self.iconifiedIconHue = createEntry(self.tableTaskIconified, maxSize=6, width=8, text=ICONIFIED_ICON_ALPHA, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskIconified, text="Iconified Icon Saturation (-100 to 100)", gridX=0, gridY=3, xPadding=10)
self.iconifiedIconSat = createEntry(self.tableTaskIconified, maxSize=6, width=8, text=ICONIFIED_ICON_SAT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskIconified, text="Iconified Icon Brightness (-100 to 100)", gridX=0, gridY=4, xPadding=10)
self.iconifiedIconBri = createEntry(self.tableTaskIconified, maxSize=6, width=8, text=ICONIFIED_ICON_BRI, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_iconified_icon_asb", (self.iconifiedIconHue, self.iconifiedIconSat, self.iconifiedIconBri))
createLabel(self.tableTaskIconified, text="Iconified Font Color", gridX=0, gridY=5, xPadding=10)
self.fontIconifiedCol = createEntry(self.tableTaskIconified, maxSize=7, width=9, text="", gridX=1, gridY=5, xExpand=True, yExpand=False, handler=None, name="fontIconifiedCol")
self.fontIconifiedCol.connect("activate", self.colorTyped)
self.fontIconifiedColButton = createColorButton(self.tableTaskIconified, color=self.defaults["fgColor"], useAlpha=True, name="fontIconifiedCol", gridX=2, gridY=5, handler=self.colorChange)
self.fontIconifiedCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.fontIconifiedCol.connect("changed", self.changeOccurred)
self.registerComponent("task_iconified_font_color", (self.fontIconifiedCol, self.fontIconifiedColButton))
def createSystemTrayWidgets(self):
"""Create the System Tray widgets."""
self.tableTray = gtk.Table(rows=9, columns=3, homogeneous=False)
self.tableTray.set_row_spacings(5)
self.tableTray.set_col_spacings(5)
createLabel(self.tableTray, text="Show System Tray", gridX=0, gridY=0, xPadding=10)
self.trayShow = createCheckButton(self.tableTray, active=True, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("systray", self.trayShow)
createLabel(self.tableTray, text="Padding (x, y)", gridX=0, gridY=1, xPadding=10)
self.trayPadX = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_PADDING_X, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.trayPadY = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_PADDING_Y, gridX=2, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTray, text="Horizontal Spacing", gridX=0, gridY=2, xPadding=10)
self.traySpacing = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_SPACING, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("systray_padding", (self.trayPadX, self.trayPadY, self.traySpacing))
createLabel(self.tableTray, text="System Tray Background ID", gridX=0, gridY=3, xPadding=10)
self.trayBg = createComboBox(self.tableTray, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=3, handler=self.changeOccurred)
self.registerComponent("systray_background_id", self.trayBg)
createLabel(self.tableTray, text="Icon Ordering", gridX=0, gridY=4, xPadding=10)
self.trayOrder = createComboBox(self.tableTray, ["ascending", "descending", "left2right", "right2left"], gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("systray_sort", self.trayOrder)
createLabel(self.tableTray, text="Maximum Icon Size (0 for automatic size)", gridX=0, gridY=5, xPadding=10)
self.trayMaxIconSize = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_MAX_ICON_SIZE, gridX=1, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("systray_icon_size", self.trayMaxIconSize)
createLabel(self.tableTray, text="System Tray Icon Alpha (0 to 100)", gridX=0, gridY=6, xPadding=10)
self.trayIconHue = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_ICON_ALPHA, gridX=1, gridY=6, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTray, text="System Tray Icon Saturation (-100 to 100)", gridX=0, gridY=7, xPadding=10)
self.trayIconSat = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_ICON_SAT, gridX=1, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTray, text="System Tray Icon Brightness (-100 to 100)", gridX=0, gridY=8, xPadding=10)
self.trayIconBri = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_ICON_BRI, gridX=1, gridY=8, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("systray_icon_asb", (self.trayIconHue, self.trayIconSat, self.trayIconBri))
def createClockDisplayWidgets(self):
"""Create the Clock Display widgets."""
self.tableClockDisplays = gtk.Table(rows=3, columns=3, homogeneous=False)
self.tableClockDisplays.set_row_spacings(5)
self.tableClockDisplays.set_col_spacings(5)
createLabel(self.tableClockDisplays, text="Show", gridX=0, gridY=0, xPadding=10)
self.clockCheckButton = createCheckButton(self.tableClockDisplays, active=True, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
createLabel(self.tableClockDisplays, text="Time 1 Format", gridX=0, gridY=1, xPadding=10)
self.clock1Format = createEntry(self.tableClockDisplays, maxSize=50, width=20, text=CLOCK_FMT_1, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.clock1CheckButton = createCheckButton(self.tableClockDisplays, text="Show", active=True, gridX=2, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("time1_format", self.clock1Format)
createLabel(self.tableClockDisplays, text="Time 1 Font", gridX=0, gridY=2, xPadding=10)
self.clock1FontButton = createFontButton(self.tableClockDisplays, font=self.defaults["font"], gridX=1, gridY=2, handler=self.changeOccurred)
self.registerComponent("time1_font", self.clock1FontButton)
createLabel(self.tableClockDisplays, text="Time 2 Format", gridX=0, gridY=3, xPadding=10)
self.clock2Format = createEntry(self.tableClockDisplays, maxSize=50, width=20, text=CLOCK_FMT_2, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.clock2CheckButton = createCheckButton(self.tableClockDisplays, text="Show", active=True, gridX=2, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("time2_format", self.clock2Format)
createLabel(self.tableClockDisplays, text="Time 2 Font", gridX=0, gridY=4, xPadding=10)
self.clock2FontButton = createFontButton(self.tableClockDisplays, font=self.defaults["font"], gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("time2_font", self.clock2FontButton)
createLabel(self.tableClockDisplays, text="Tooltip Format", gridX=0, gridY=5, xPadding=10)
self.clockTooltipFormat = createEntry(self.tableClockDisplays, maxSize=50, width=20, text=CLOCK_TOOLTIP, gridX=1, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.clockTooltipCheckButton = createCheckButton(self.tableClockDisplays, text="Show", active=True, gridX=2, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("clock_tooltip", self.clockTooltipFormat)
self.clockArea = gtk.ScrolledWindow()
self.clockBuf = gtk.TextBuffer()
self.clockTextView = gtk.TextView(self.clockBuf)
self.clockBuf.insert_at_cursor("%H 00-23 (24-hour) %I 01-12 (12-hour) %l 1-12 (12-hour) %M 00-59 (minutes)\n%S 00-59 (seconds) %P am/pm %b Jan-Dec %B January-December\n%a Sun-Sat %A Sunday-Saturday %d 01-31 (day) %e 1-31 (day)\n%y 2 digit year, e.g. 09 %Y 4 digit year, e.g. 2009")
self.clockTextView.set_editable(False)
self.clockArea.add_with_viewport(self.clockTextView)
self.tableClockDisplays.attach(self.clockArea, 0, 3, 6, 7, xpadding=10)
def createClockSettingsWidgets(self):
"""Create the Clock Settings widgets."""
self.tableClockSettings = gtk.Table(rows=3, columns=3, homogeneous=False)
self.tableClockSettings.set_row_spacings(5)
self.tableClockSettings.set_col_spacings(5)
createLabel(self.tableClockSettings, text="Clock Font Color", gridX=0, gridY=0, xPadding=10)
self.clockFontCol = createEntry(self.tableClockSettings, maxSize=7, width=9, text="", gridX=1, gridY=0, xExpand=True, yExpand=False, handler=None, name="clockFontCol")
self.clockFontCol.connect("activate", self.colorTyped)
self.clockFontColButton = createColorButton(self.tableClockSettings, color=self.defaults["fgColor"], useAlpha=True, name="clockFontCol", gridX=2, gridY=0, handler=self.colorChange)
self.clockFontCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.clockFontCol.connect("changed", self.changeOccurred)
self.registerComponent("clock_font_color", (self.clockFontCol, self.clockFontColButton))
createLabel(self.tableClockSettings, text="Padding (x, y)", gridX=0, gridY=1, xPadding=10)
self.clockPadX = createEntry(self.tableClockSettings, maxSize=6, width=8, text=CLOCK_PADDING_X, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.clockPadY = createEntry(self.tableClockSettings, maxSize=6, width=8, text=CLOCK_PADDING_Y, gridX=2, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("clock_padding", (self.clockPadX, self.clockPadY))
createLabel(self.tableClockSettings, text="Clock Background ID", gridX=0, gridY=2, xPadding=10)
self.clockBg = createComboBox(self.tableClockSettings, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=2, handler=self.changeOccurred)
self.registerComponent("clock_background_id", self.clockBg)
createLabel(self.tableClockSettings, text="Left Click Command", gridX=0, gridY=3, xPadding=10)
self.clockLClick = createEntry(self.tableClockSettings, maxSize=50, width=20, text=CLOCK_LCLICK, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("clock_lclick_command", self.clockLClick)
createLabel(self.tableClockSettings, text="Right Click Command", gridX=0, gridY=4, xPadding=10)
self.clockRClick = createEntry(self.tableClockSettings, maxSize=50, width=20, text=CLOCK_RCLICK, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("clock_rclick_command", self.clockRClick)
createLabel(self.tableClockSettings, text="Time 1 Timezone", gridX=0, gridY=5, xPadding=10)
self.clockTime1Timezone = createEntry(self.tableClockSettings, maxSize=50, width=20, text=CLOCK_TIME1_TIMEZONE, gridX=1, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.clockTimezone1CheckButton = createCheckButton(self.tableClockSettings, text="Enable", active=False, gridX=2, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("time1_timezone", self.clockTime1Timezone)
createLabel(self.tableClockSettings, text="Time 2 Timezone", gridX=0, gridY=6, xPadding=10)
self.clockTime2Timezone = createEntry(self.tableClockSettings, maxSize=50, width=20, text=CLOCK_TIME2_TIMEZONE, gridX=1, gridY=6, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.clockTimezone2CheckButton = createCheckButton(self.tableClockSettings, text="Enable", active=False, gridX=2, gridY=6, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("time2_timezone", self.clockTime2Timezone)
createLabel(self.tableClockSettings, text="Tooltip Timezone", gridX=0, gridY=7, xPadding=10)
self.clockTooltipTimezone = createEntry(self.tableClockSettings, maxSize=50, width=20, text=CLOCK_TOOLTIP_TIMEZONE, gridX=1, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.clockTimezoneTooltipCheckButton = createCheckButton(self.tableClockSettings, text="Enable", active=False, gridX=2, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("clock_tooltip_timezone", self.clockTooltipTimezone)
def createMouseWidgets(self):
"""Creates the Mouse widgets."""
self.tableMouse = gtk.Table(rows=4, columns=3, homogeneous=False)
self.tableMouse.set_row_spacings(5)
self.tableMouse.set_col_spacings(5)
mouseCmds = ["none", "close", "toggle", "iconify", "shade", "toggle_iconify", "maximize_restore", "desktop_left", "desktop_right", "next_task", "prev_task"]
createLabel(self.tableMouse, text="Middle Mouse Click Action", gridX=0, gridY=0, xPadding=10)
self.mouseMiddle = createComboBox(self.tableMouse, mouseCmds, gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("mouse_middle", self.mouseMiddle)
createLabel(self.tableMouse, text="Right Mouse Click Action", gridX=0, gridY=1, xPadding=10)
self.mouseRight = createComboBox(self.tableMouse, mouseCmds, gridX=1, gridY=1, handler=self.changeOccurred)
self.registerComponent("mouse_right", self.mouseRight)
createLabel(self.tableMouse, text="Mouse Wheel Scroll Up Action", gridX=0, gridY=2, xPadding=10)
self.mouseUp = createComboBox(self.tableMouse, mouseCmds, gridX=1, gridY=2, handler=self.changeOccurred)
self.registerComponent("mouse_scroll_up", self.mouseUp)
createLabel(self.tableMouse, text="Mouse Wheel Scroll Down Action", gridX=0, gridY=3, xPadding=10)
self.mouseDown = createComboBox(self.tableMouse, mouseCmds, gridX=1, gridY=3, handler=self.changeOccurred)
self.registerComponent("mouse_scroll_down", self.mouseDown)
def createTooltipsWidgets(self):
"""Creates the Tooltips widgets."""
self.tableTooltip = gtk.Table(rows=7, columns=3, homogeneous=False)
self.tableTooltip.set_row_spacings(5)
self.tableTooltip.set_col_spacings(5)
createLabel(self.tableTooltip, text="Show Tooltips", gridX=0, gridY=0, xPadding=10)
self.tooltipShow = createCheckButton(self.tableTooltip, active=False, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("tooltip", self.tooltipShow)
createLabel(self.tableTooltip, text="Padding (x, y)", gridX=0, gridY=1, xPadding=10)
self.tooltipPadX = createEntry(self.tableTooltip, maxSize=6, width=8, text=TOOLTIP_PADDING_X, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.tooltipPadY = createEntry(self.tableTooltip, maxSize=6, width=8, text=TOOLTIP_PADDING_Y, gridX=2, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("tooltip_padding", (self.tooltipPadX, self.tooltipPadY))
createLabel(self.tableTooltip, text="Tooltip Show Timeout (seconds)", gridX=0, gridY=2, xPadding=10)
self.tooltipShowTime = createEntry(self.tableTooltip, maxSize=6, width=8, text=TOOLTIP_SHOW_TIMEOUT, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("tooltip_show_timeout", self.tooltipShowTime)
createLabel(self.tableTooltip, text="Tooltip Hide Timeout (seconds)", gridX=0, gridY=3, xPadding=10)
self.tooltipHideTime = createEntry(self.tableTooltip, maxSize=6, width=8, text=TOOLTIP_HIDE_TIMEOUT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("tooltip_hide_timeout", self.tooltipHideTime)
createLabel(self.tableTooltip, text="Tooltip Background ID", gridX=0, gridY=4, xPadding=10)
self.tooltipBg = createComboBox(self.tableTooltip, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("tooltip_background_id", self.tooltipBg)
createLabel(self.tableTooltip, text="Tooltip Font", gridX=0, gridY=5, xPadding=10)
self.tooltipFont = createFontButton(self.tableTooltip, font=self.defaults["font"], gridX=1, gridY=5, handler=self.changeOccurred)
self.registerComponent("tooltip_font", self.tooltipFont)
createLabel(self.tableTooltip, text="Tooltip Font Color", gridX=0, gridY=6, xPadding=10)
self.tooltipFontCol = createEntry(self.tableTooltip, maxSize=7, width=9, text="", gridX=1, gridY=6, xExpand=True, yExpand=False, handler=None, name="tooltipFontCol")
self.tooltipFontCol.connect("activate", self.colorTyped)
self.tooltipFontColButton = createColorButton(self.tableTooltip, color=self.defaults["fgColor"], useAlpha=True, name="tooltipFontCol", gridX=2, gridY=6, handler=self.colorChange)
self.tooltipFontCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.tooltipFontCol.connect("changed", self.changeOccurred)
self.registerComponent("tooltip_font_color", (self.tooltipFontCol, self.tooltipFontColButton))
def createBatteryWidgets(self):
"""Creates the Battery widgets."""
self.tableBattery = gtk.Table(rows=8, columns=3, homogeneous=False)
self.tableBattery.set_row_spacings(5)
self.tableBattery.set_col_spacings(5)
createLabel(self.tableBattery, text="Show Battery Applet", gridX=0, gridY=0, xPadding=10)
self.batteryCheckButton = createCheckButton(self.tableBattery, active=False, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("battery", self.batteryCheckButton)
createLabel(self.tableBattery, text="Battery Low Status (%)", gridX=0, gridY=1, xPadding=10)
self.batteryLow = createEntry(self.tableBattery, maxSize=6, width=8, text=BATTERY_LOW, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("battery_low_status", self.batteryLow)
createLabel(self.tableBattery, text="Battery Low Action", gridX=0, gridY=2, xPadding=10)
self.batteryLowAction = createEntry(self.tableBattery, maxSize=150, width=32, text=BATTERY_ACTION, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("battery_low_cmd", self.batteryLowAction)
createLabel(self.tableBattery, text="Battery Hide (0 to 100)", gridX=0, gridY=3, xPadding=10)
self.batteryHide = createEntry(self.tableBattery, maxSize=6, width=8, text=BATTERY_HIDE, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("battery_hide", self.batteryHide)
createLabel(self.tableBattery, text="Battery 1 Font", gridX=0, gridY=4, xPadding=10)
self.bat1FontButton = createFontButton(self.tableBattery, font=self.defaults["font"], gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("bat1_font", self.bat1FontButton)
createLabel(self.tableBattery, text="Battery 2 Font", gridX=0, gridY=5, xPadding=10)
self.bat2FontButton = createFontButton(self.tableBattery, font=self.defaults["font"], gridX=1, gridY=5, handler=self.changeOccurred)
self.registerComponent("bat2_font", self.bat2FontButton)
createLabel(self.tableBattery, text="Battery Font Color", gridX=0, gridY=6, xPadding=10)
self.batteryFontCol = createEntry(self.tableBattery, maxSize=7, width=9, text="", gridX=1, gridY=6, xExpand=True, yExpand=False, handler=None, name="batteryFontCol")
self.batteryFontCol.connect("activate", self.colorTyped)
self.batteryFontColButton = createColorButton(self.tableBattery, color=self.defaults["fgColor"], useAlpha=True, name="batteryFontCol", gridX=2, gridY=6, handler=self.colorChange)
self.batteryFontCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.batteryFontCol.connect("changed", self.changeOccurred)
self.registerComponent("battery_font_color", (self.batteryFontCol, self.batteryFontColButton))
createLabel(self.tableBattery, text="Padding (x, y)", gridX=0, gridY=7, xPadding=10)
self.batteryPadX = createEntry(self.tableBattery, maxSize=6, width=8, text=BATTERY_PADDING_X, gridX=1, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.batteryPadY = createEntry(self.tableBattery, maxSize=6, width=8, text=BATTERY_PADDING_Y, gridX=2, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("battery_padding", (self.batteryPadX, self.batteryPadY))
createLabel(self.tableBattery, text="Battery Background ID", gridX=0, gridY=8, xPadding=10)
self.batteryBg = createComboBox(self.tableBattery, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=8, handler=self.changeOccurred)
self.registerComponent("battery_background_id", self.batteryBg)
def registerComponent(self, configProperty, component):
"""Registers a component with a particular property from
a tint2 config. Note: a component may be a double or
triple if that property has more than one value associated
with it."""
self.propUI[configProperty] = component
def getComponent(self, configProperty):
"""Fetches the component associated with a tint2 property."""
return self.propUI[configProperty] if configProperty in self.propUI else None
def about(self, action=None):
"""Displays the About dialog."""
about = gtk.AboutDialog()
about.set_program_name(NAME)
about.set_version(VERSION)
about.set_authors(AUTHORS)
about.set_comments(COMMENTS)
about.set_website(WEBSITE)
gtk.about_dialog_set_url_hook(self.aboutLinkCallback)
about.run()
about.destroy()
def aboutLinkCallback(dialog, link, data=None):
"""Callback for when a URL is clicked in an About dialog."""
try:
webbrowser.open(link)
except:
errorDialog(self, "Your default web-browser could not be opened.\nPlease visit %s" % link)
def addBg(self):
"""Adds a new background to the list of backgrounds."""
self.bgs += [gtk.Table(4, 3, False)]
createLabel(self.bgs[-1], text="Corner Rounding (px)", gridX=0, gridY=0, xPadding=10)
createEntry(self.bgs[-1], maxSize=7, width=9, text=BG_ROUNDING, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred, name="rounded")
createLabel(self.bgs[-1], text="Background Color", gridX=0, gridY=1, xPadding=10)
temp = gtk.Entry(7)
temp.set_width_chars(9)
temp.set_name("bgColEntry")
temp.set_text(self.defaults["bgColor"])
temp.connect("changed", self.changeOccurred)
temp.connect("activate", self.colorTyped)
self.bgs[-1].attach(temp, 1, 2, 1, 2, xoptions=gtk.EXPAND)
temp = gtk.ColorButton(gtk.gdk.color_parse(self.defaults["bgColor"]))
temp.set_use_alpha(True)
temp.set_name("bgCol")
temp.connect("color-set", self.colorChange)
self.bgs[-1].attach(temp, 2, 3, 1, 2, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
createLabel(self.bgs[-1], text="Border Width (px)", gridX=0, gridY=2, xPadding=10)
createEntry(self.bgs[-1], maxSize=7, width=9, text=BG_BORDER, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred, name="border")
createLabel(self.bgs[-1], text="Border Color", gridX=0, gridY=3, xPadding=10)
temp = gtk.Entry(7)
temp.set_width_chars(9)
temp.set_name("borderColEntry")
temp.connect("activate", self.colorTyped)
temp.set_text(self.defaults["borderColor"])
temp.connect("changed", self.changeOccurred)
self.bgs[-1].attach(temp, 1, 2, 3, 4, xoptions=gtk.EXPAND)
temp = gtk.ColorButton(gtk.gdk.color_parse(self.defaults["borderColor"]))
temp.set_use_alpha(True)
temp.set_name("borderCol")
temp.connect("color-set", self.colorChange)
self.bgs[-1].attach(temp, 2, 3, 3, 4, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
# Note: Only set init to True when initialising background styles.
# This prevents unwanted calls to changeOccurred()
def addBgClick(self, widget=None, init=False):
"""Creates a new background and adds a new tab to the notebook."""
n = self.bgNotebook.get_n_pages()
if n > (self.defaults["bgCount"] + 2):
if confirmDialog(self, "You already have %d background styles. Are you sure you would like another?" % n) == gtk.RESPONSE_NO:
return
self.addBg()
newId = len(self.bgs)
self.bgNotebook.append_page(self.bgs[newId-1], gtk.Label("Background ID %d" % (newId)))
self.bgNotebook.show_all()
self.updateComboBoxes(n, "add")
self.bgNotebook.set_current_page(n)
if not init:
self.changeOccurred()
def addBgDefs(self, bgDefs):
"""Add interface elements for a list of background style definitions. bgDefs
should be a list containing dictionaries with the following keys: rounded,
border_width, background_color, border_color"""
for d in bgDefs:
self.addBg()
for child in self.bgs[-1].get_children():
if child.get_name() == "rounded":
child.set_text(d["rounded"])
elif child.get_name() == "border":
child.set_text(d["border_width"])
elif child.get_name() == "bgColEntry":
child.set_text(d["background_color"].split(" ")[0].strip())
child.activate()
elif child.get_name() == "borderColEntry":
child.set_text(d["border_color"].split(" ")[0].strip())
child.activate()
elif child.get_name() == "bgCol":
list = d["background_color"].split(" ")
if len(list) > 1:
child.set_alpha(int(int(list[1].strip()) * 65535 / 100.0))
else:
child.set_alpha(65535)
elif child.get_name() == "borderCol":
list = d["border_color"].split(" ")
if len(list) > 1:
child.set_alpha(int(int(list[1].strip()) * 65535 / 100.0))
else:
child.set_alpha(65535)
newId = len(self.bgs)
self.bgNotebook.append_page(self.bgs[newId-1], gtk.Label("Background ID %d" % (newId)))
self.bgNotebook.show_all()
self.updateComboBoxes(newId-1, "add")
self.bgNotebook.set_current_page(newId)
def apply(self, widget, event=None, confirmChange=True):
"""Applies the current config to tint2."""
# Check if tint2 is running
procs = os.popen('pgrep -x "tint2"') # Check list of active processes for tint2
pids = [] # List of process ids for tint2
for proc in procs.readlines():
pids += [int(proc.strip().split(" ")[0])]
procs.close()
if self.oneConfigFile:
# Save and copy as default
self.save()
tmpSrc = self.filename
tmpDest = os.path.expandvars("${HOME}") + "/.config/tint2/tint2rc"
try:
shutil.copyfile(tmpSrc, tmpDest)
except shutil.Error:
pass
# Ask tint2 to reload config
for pid in pids:
os.kill(pid, signal.SIGUSR1)
else:
if confirmDialog(self, "This will terminate all currently running instances of tint2 before applying config. Continue?") == gtk.RESPONSE_YES:
if not self.save():
return
#shutil.copyfile(self.filename, self.filename+".backup") # Create backup
# If it is - kill it
for pid in pids:
os.kill(pid, signal.SIGTERM)
# Lastly, start it
os.spawnv(os.P_NOWAIT, self.tint2Bin, [self.tint2Bin, "-c", self.filename])
if confirmChange and self.filename != (os.path.expandvars("${HOME}") + "/.config/tint2/tint2rc") and confirmDialog(self, "Use this as default tint2 config?") == gtk.RESPONSE_YES:
tmp = self.filename
self.filename = os.path.expandvars("${HOME}") + "/.config/tint2/tint2rc"
try:
shutil.copyfile(tmp, self.filename)
except shutil.Error:
pass
#if confirmChange and confirmDialog(self, "Keep this config?") == gtk.RESPONSE_NO:
# shutil.copyfile(self.filename+".backup", self.filename) # Create backup
# self.apply(widget, event, False)
def changeAllFonts(self, widget):
"""Changes all fonts at once."""
dialog = gtk.FontSelectionDialog("Select Font")
dialog.set_font_name(self.defaults["font"])
if dialog.run() == gtk.RESPONSE_OK:
newFont = dialog.get_font_name()
self.clock1FontButton.set_font_name(newFont)
self.clock2FontButton.set_font_name(newFont)
self.bat1FontButton.set_font_name(newFont)
self.bat2FontButton.set_font_name(newFont)
self.fontButton.set_font_name(newFont)
dialog.destroy()
self.generateConfig()
self.changeOccurred()
def changeDefaults(self, widget=None):
"""Shows the style preferences widget."""
TintWizardPrefGUI(self)
def changeOccurred(self, widget=None):
"""Called when the user changes something, i.e. entry value"""
self.toSave = True
self.updateStatusBar(change=True)
if widget == self.panelOrientation:
if self.panelOrientation.get_active_text() == "horizontal":
self.panelSizeLabel.set_text("Size (width, height)")
else:
self.panelSizeLabel.set_text("Size (height, width)")
def colorChange(self, widget):
"""Update the text entry when a color button is updated."""
r = widget.get_color().red
g = widget.get_color().green
b = widget.get_color().blue
label = self.getColorLabel(widget)
# No label found
if not label:
return
label.set_text(rgbToHex(r, g, b))
self.changeOccurred()
def colorTyped(self, widget):
"""Update the color button when a valid value is typed into the entry."""
s = widget.get_text()
# The color button associated with this widget.
colorButton = self.getColorButton(widget)
# Just a precautionary check - this situation should never arise.
if not colorButton:
#print "Error in colorTyped() -- unrecognised entry widget."
return
# If the entered value is invalid, set textbox to the current
# hex value of the associated color button.
buttonHex = self.getHexFromWidget(colorButton)
if len(s) != 7:
errorDialog(self, "Invalid color specification: [%s]" % s)
widget.set_text(buttonHex)
return
try:
col = gtk.gdk.Color(s)
except:
errorDialog(self, "Invalid color specification: [%s]" % s)
widget.set_text(buttonHex)
return
colorButton.set_color(col)
# Note: only set init to True when removing backgrounds for a new config
# This prevents unwanted calls to changeOccurred()
def delBgClick(self, widget=None, prompt=True, init=False):
"""Deletes the selected background after confirming with the user."""
selected = self.bgNotebook.get_current_page()
if selected == -1: # Nothing to remove
return
if prompt:
if confirmDialog(self, "Remove this background?") != gtk.RESPONSE_YES:
return
self.bgNotebook.remove_page(selected)
self.bgs.pop(selected)
for i in range(self.bgNotebook.get_n_pages()):
self.bgNotebook.set_tab_label_text(self.bgNotebook.get_nth_page(i), "Background ID %d" % (i+1))
self.bgNotebook.show_all()
self.updateComboBoxes(len(self.bgs) + 1, "remove")
if not init:
self.changeOccurred()
def generateConfig(self):
"""Reads values from each widget and generates a config."""
self.configBuf.delete(self.configBuf.get_start_iter(), self.configBuf.get_end_iter())
self.configBuf.insert(self.configBuf.get_end_iter(), "# Tint2 config file\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "# Generated by tintwizard (http://code.google.com/p/tintwizard/)\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "# For information on manually configuring tint2 see http://code.google.com/p/tint2/wiki/Configure\n\n")
if not self.oneConfigFile:
self.configBuf.insert(self.configBuf.get_end_iter(), "# To use this as default tint2 config: save as $HOME/.config/tint2/tint2rc\n\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "# Background definitions\n")
for i in range(len(self.bgs)):
self.configBuf.insert(self.configBuf.get_end_iter(), "# ID %d\n" % (i + 1))
for child in self.bgs[i].get_children():
if child.get_name() == "rounded":
rounded = child.get_text() if child.get_text() else BG_ROUNDING
elif child.get_name() == "border":
borderW = child.get_text() if child.get_text() else BG_BORDER
elif child.get_name() == "bgCol":
bgCol = self.getHexFromWidget(child)
bgAlpha = int(child.get_alpha() / 65535.0 * 100)
elif child.get_name() == "borderCol":
borderCol = self.getHexFromWidget(child)
borderAlpha = int(child.get_alpha() / 65535.0 * 100)
self.configBuf.insert(self.configBuf.get_end_iter(), "rounded = %s\n" % (rounded))
self.configBuf.insert(self.configBuf.get_end_iter(), "border_width = %s\n" % (borderW))
self.configBuf.insert(self.configBuf.get_end_iter(), "background_color = %s %d\n" % (bgCol, bgAlpha))
self.configBuf.insert(self.configBuf.get_end_iter(), "border_color = %s %d\n\n" % (borderCol, borderAlpha))
self.configBuf.insert(self.configBuf.get_end_iter(), "# Panel\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_monitor = %s\n" % (self.panelMonitor.get_text() if self.panelMonitor.get_text() else PANEL_MONITOR))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_position = %s %s %s\n" % (self.panelPosY.get_active_text(), self.panelPosX.get_active_text(), self.panelOrientation.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_size = %s %s\n" % (self.panelSizeX.get_text() if self.panelSizeX.get_text() else PANEL_SIZE_X,
self.panelSizeY.get_text() if self.panelSizeY.get_text() else PANEL_SIZE_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_margin = %s %s\n" % (self.panelMarginX.get_text() if self.panelMarginX.get_text() else PANEL_MARGIN_X,
self.panelMarginY.get_text() if self.panelMarginY.get_text() else PANEL_MARGIN_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_padding = %s %s %s\n" % (self.panelPadX.get_text() if self.panelPadX.get_text() else PANEL_PADDING_X,
self.panelPadY.get_text() if self.panelPadY.get_text() else PANEL_PADDING_Y,
self.panelSpacing.get_text() if self.panelSpacing.get_text() else TASKBAR_SPACING))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_dock = %s\n" % int(self.panelDock.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "wm_menu = %s\n" % int(self.panelMenu.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_layer = %s\n" % (self.panelLayer.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_background_id = %s\n" % (self.panelBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Panel Autohide\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "autohide = %s\n" % int(self.panelAutohide.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "autohide_show_timeout = %s\n" % (self.panelAutohideShow.get_text() if self.panelAutohideShow.get_text() else PANEL_AUTOHIDE_SHOW))
self.configBuf.insert(self.configBuf.get_end_iter(), "autohide_hide_timeout = %s\n" % (self.panelAutohideHide.get_text() if self.panelAutohideHide.get_text() else PANEL_AUTOHIDE_HIDE))
self.configBuf.insert(self.configBuf.get_end_iter(), "autohide_height = %s\n" % (self.panelAutohideHeight.get_text() if self.panelAutohideHeight.get_text() else PANEL_AUTOHIDE_HEIGHT))
self.configBuf.insert(self.configBuf.get_end_iter(), "strut_policy = %s\n" % (self.panelAutohideStrut.get_active_text() if self.panelAutohideStrut.get_active_text() else PANEL_AUTOHIDE_STRUT))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Taskbar\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "taskbar_mode = %s\n" % (self.taskbarMode.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "taskbar_padding = %s %s %s\n" % (self.taskbarPadX.get_text() if self.taskbarPadX.get_text() else TASKBAR_PADDING_X,
self.taskbarPadY.get_text() if self.taskbarPadY.get_text() else TASKBAR_PADDING_X,
self.taskbarSpacing.get_text() if self.taskbarSpacing.get_text() else TASK_SPACING))
self.configBuf.insert(self.configBuf.get_end_iter(), "taskbar_background_id = %s\n" % (self.taskbarBg.get_active()))
# Comment out the taskbar_active_background_id if user has "disabled" it
if self.taskbarActiveBgEnable.get_active() == 0:
self.configBuf.insert(self.configBuf.get_end_iter(), "#")
self.configBuf.insert(self.configBuf.get_end_iter(), "taskbar_active_background_id = %s\n" % (self.taskbarActiveBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Tasks\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "urgent_nb_of_blink = %s\n" % (self.taskBlinks.get_text() if self.taskBlinks.get_text() else TASK_BLINKS))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_icon = %s\n" % int(self.taskIconCheckButton.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_text = %s\n" % int(self.taskTextCheckButton.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_centered = %s\n" % int(self.taskCentreCheckButton.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_maximum_size = %s %s\n" % (self.taskMaxSizeX.get_text() if self.taskMaxSizeX.get_text() else TASK_MAXIMUM_SIZE_X, self.taskMaxSizeY.get_text() if self.taskMaxSizeY.get_text() else TASK_MAXIMUM_SIZE_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_padding = %s %s\n" % (self.taskPadX.get_text() if self.taskPadX.get_text() else TASK_PADDING_X,
self.taskPadY.get_text() if self.taskPadY.get_text() else TASK_PADDING_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_background_id = %s\n" % (self.taskBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_active_background_id = %s\n" % (self.taskActiveBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_urgent_background_id = %s\n" % (self.taskUrgentBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_iconified_background_id = %s\n" % (self.taskIconifiedBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Task Icons\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "task_icon_asb = %s %s %s\n" % (self.iconHue.get_text() if self.iconHue.get_text() else ICON_ALPHA,
self.iconSat.get_text() if self.iconSat.get_text() else ICON_SAT,
self.iconBri.get_text() if self.iconBri.get_text() else ICON_BRI))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_active_icon_asb = %s %s %s\n" % (self.activeIconHue.get_text() if self.activeIconHue.get_text() else ACTIVE_ICON_ALPHA,
self.activeIconSat.get_text() if self.activeIconSat.get_text() else ACTIVE_ICON_SAT,
self.activeIconBri.get_text() if self.activeIconBri.get_text() else ACTIVE_ICON_BRI))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_urgent_icon_asb = %s %s %s\n" % (self.urgentIconHue.get_text() if self.urgentIconHue.get_text() else URGENT_ICON_ALPHA,
self.urgentIconSat.get_text() if self.urgentIconSat.get_text() else URGENT_ICON_SAT,
self.urgentIconBri.get_text() if self.urgentIconBri.get_text() else URGENT_ICON_BRI))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_iconified_icon_asb = %s %s %s\n" % (self.iconifiedIconHue.get_text() if self.iconifiedIconHue.get_text() else ICONIFIED_ICON_ALPHA,
self.iconifiedIconSat.get_text() if self.iconifiedIconSat.get_text() else ICONIFIED_ICON_SAT,
self.iconifiedIconBri.get_text() if self.iconifiedIconBri.get_text() else ICONIFIED_ICON_BRI))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Fonts\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "task_font = %s\n" % (self.fontButton.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_font_color = %s %s\n" % (self.getHexFromWidget(self.fontColButton),
int(self.fontColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_active_font_color = %s %s\n" % (self.getHexFromWidget(self.fontActiveColButton),
int(self.fontActiveColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_urgent_font_color = %s %s\n" % (self.getHexFromWidget(self.fontUrgentColButton),
int(self.fontUrgentColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_iconified_font_color = %s %s\n" % (self.getHexFromWidget(self.fontIconifiedColButton),
int(self.fontIconifiedColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "font_shadow = %s\n" % int(self.fontShadowCheckButton.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# System Tray\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "systray = %s\n" % int(self.trayShow.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "systray_padding = %s %s %s\n" % (self.trayPadX.get_text() if self.trayPadX.get_text() else TRAY_PADDING_X,
self.trayPadY.get_text() if self.trayPadY.get_text() else TRAY_PADDING_Y,
self.traySpacing.get_text() if self.traySpacing.get_text() else TRAY_SPACING))
self.configBuf.insert(self.configBuf.get_end_iter(), "systray_sort = %s\n" % (self.trayOrder.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "systray_background_id = %s\n" % (self.trayBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "systray_icon_size = %s\n" % (self.trayMaxIconSize.get_text() if self.trayMaxIconSize.get_text() else TRAY_MAX_ICON_SIZE))
self.configBuf.insert(self.configBuf.get_end_iter(), "systray_icon_asb = %s %s %s\n" % (self.trayIconHue.get_text() if self.trayIconHue.get_text() else TRAY_ICON_ALPHA,
self.trayIconSat.get_text() if self.trayIconSat.get_text() else TRAY_ICON_SAT,
self.trayIconBri.get_text() if self.trayIconBri.get_text() else TRAY_ICON_BRI))
if self.clockCheckButton.get_active():
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Clock\n")
if self.clock1CheckButton.get_active():
self.configBuf.insert(self.configBuf.get_end_iter(), "time1_format = %s\n" % (self.clock1Format.get_text() if self.clock1Format.get_text() else CLOCK_FMT_1))
self.configBuf.insert(self.configBuf.get_end_iter(), "time1_font = %s\n" % (self.clock1FontButton.get_font_name()))
if self.clock2CheckButton.get_active():
self.configBuf.insert(self.configBuf.get_end_iter(), "time2_format = %s\n" % (self.clock2Format.get_text() if self.clock2Format.get_text() else CLOCK_FMT_2))
self.configBuf.insert(self.configBuf.get_end_iter(), "time2_font = %s\n" % (self.clock2FontButton.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_font_color = %s %s\n" % (self.getHexFromWidget(self.clockFontColButton),
int(self.clockFontColButton.get_alpha() / 65535.0 * 100)))
if self.clockTooltipCheckButton.get_active():
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_tooltip = %s\n" % (self.clockTooltipFormat.get_text() if self.clockTooltipFormat.get_text() else CLOCK_TOOLTIP))
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_padding = %s %s\n" % (self.clockPadX.get_text() if self.clockPadX.get_text() else CLOCK_PADDING_X,
self.clockPadY.get_text() if self.clockPadY.get_text() else CLOCK_PADDING_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_background_id = %s\n" % (self.clockBg.get_active()))
if self.clockLClick.get_text():
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_lclick_command = %s\n" % (self.clockLClick.get_text()))
if self.clockRClick.get_text():
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_rclick_command = %s\n" % (self.clockRClick.get_text()))
if self.clockTimezone1CheckButton.get_active():
self.configBuf.insert(self.configBuf.get_end_iter(), "time1_timezone = %s\n" % (self.clockTime1Timezone.get_text() if self.clockTime1Timezone.get_text() else CLOCK_TIME1_TIMEZONE))
if self.clockTimezone2CheckButton.get_active():
self.configBuf.insert(self.configBuf.get_end_iter(), "time2_timezone = %s\n" % (self.clockTime2Timezone.get_text() if self.clockTime2Timezone.get_text() else CLOCK_TIME2_TIMEZONE))
if self.clockTimezoneTooltipCheckButton.get_active():
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_tooltip_timezone = %s\n" % (self.clockTooltipTimezone.get_text() if self.clockTooltipTimezone.get_text() else CLOCK_TOOLTIP_TIMEZONE))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Tooltips\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip = %s\n" % int(self.tooltipShow.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_padding = %s %s\n" % (self.tooltipPadX.get_text() if self.tooltipPadX.get_text() else TOOLTIP_PADDING_Y,
self.tooltipPadY.get_text() if self.tooltipPadY.get_text() else TOOLTIP_PADDING_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_show_timeout = %s\n" % (self.tooltipShowTime.get_text() if self.tooltipShowTime.get_text() else TOOLTIP_SHOW_TIMEOUT))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_hide_timeout = %s\n" % (self.tooltipHideTime.get_text() if self.tooltipHideTime.get_text() else TOOLTIP_HIDE_TIMEOUT))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_background_id = %s\n" % (self.tooltipBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_font = %s\n" % (self.tooltipFont.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_font_color = %s %s\n" % (self.getHexFromWidget(self.tooltipFontColButton),
int(self.tooltipFontColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Mouse\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "mouse_middle = %s\n" % (self.mouseMiddle.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "mouse_right = %s\n" % (self.mouseRight.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "mouse_scroll_up = %s\n" % (self.mouseUp.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "mouse_scroll_down = %s\n" % (self.mouseDown.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Battery\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "battery = %s\n" % int(self.batteryCheckButton.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_low_status = %s\n" % (self.batteryLow.get_text() if self.batteryLow.get_text() else BATTERY_LOW))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_low_cmd = %s\n" % (self.batteryLowAction.get_text() if self.batteryLowAction.get_text() else BATTERY_ACTION))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_hide = %s\n" % (self.batteryHide.get_text() if self.batteryHide.get_text() else BATTERY_HIDE))
self.configBuf.insert(self.configBuf.get_end_iter(), "bat1_font = %s\n" % (self.bat1FontButton.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "bat2_font = %s\n" % (self.bat2FontButton.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_font_color = %s %s\n" % (self.getHexFromWidget(self.batteryFontColButton),
int(self.batteryFontColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_padding = %s %s\n" % (self.batteryPadX.get_text() if self.batteryPadX.get_text() else BATTERY_PADDING_Y,
self.batteryPadY.get_text() if self.batteryPadY.get_text() else BATTERY_PADDING_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_background_id = %s\n" % (self.batteryBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# End of config")
def getColorButton(self, widget):
"""Returns the color button associated with widget."""
if widget.get_name() == "fontCol":
return self.fontColButton
elif widget.get_name() == "fontActiveCol":
return self.fontActiveColButton
elif widget.get_name() == "fontUrgentCol":
return self.fontUrgentColButton
elif widget.get_name() == "fontIconifiedCol":
return self.fontIconifiedColButton
elif widget.get_name() == "clockFontCol":
return self.clockFontColButton
elif widget.get_name() == "batteryFontCol":
return self.batteryFontColButton
elif widget.get_name() == "tooltipFontCol":
return self.tooltipFontColButton
elif widget.get_name() == "bgColEntry":
bgID = self.bgNotebook.get_current_page()
for child in self.bgs[bgID].get_children():
if child.get_name() == "bgCol":
return child
elif widget.get_name() == "borderColEntry":
bgID = self.bgNotebook.get_current_page()
for child in self.bgs[bgID].get_children():
if child.get_name() == "borderCol":
return child
# No button found which matches label
return None
def getColorLabel(self, widget):
"""Gets the color label associated with a color button."""
if widget.get_name() == "fontCol":
return self.fontCol
elif widget.get_name() == "fontActiveCol":
return self.fontActiveCol
elif widget.get_name() == "fontUrgentCol":
return self.fontUrgentCol
elif widget.get_name() == "fontIconifiedCol":
return self.fontIconifiedCol
elif widget.get_name() == "clockFontCol":
return self.clockFontCol
elif widget.get_name() == "batteryFontCol":
return self.batteryFontCol
elif widget.get_name() == "tooltipFontCol":
return self.tooltipFontCol
elif widget.get_name() == "bgCol":
bgID = self.bgNotebook.get_current_page()
for child in self.bgs[bgID].get_children():
if child.get_name() == "bgColEntry":
return child
elif widget.get_name() == "borderCol":
bgID = self.bgNotebook.get_current_page()
for child in self.bgs[bgID].get_children():
if child.get_name() == "borderColEntry":
return child
# No label found which matches color button
return None
def getHexFromWidget(self, widget):
"""Returns the #RRGGBB value of a widget."""
r = widget.get_color().red
g = widget.get_color().green
b = widget.get_color().blue
return rgbToHex(r, g, b)
def help(self, action=None):
"""Opens the Help wiki page in the default web browser."""
try:
webbrowser.open("http://code.google.com/p/tintwizard/wiki/Help")
except:
errorDialog(self, "Your default web-browser could not be opened.\nPlease visit http://code.google.com/p/tintwizard/wiki/Help")
def main(self):
"""Enters the main loop."""
gtk.main()
def new(self, action=None):
"""Prepares a new config."""
if self.toSave:
self.savePrompt()
self.toSave = True
self.filename = None
self.resetConfig()
self.generateConfig()
self.updateStatusBar("New Config File [*]")
def openDef(self, widget=None):
"""Opens the default tint2 config."""
self.openFile(default=True)
def openFile(self, widget=None, default=False):
"""Reads from a config file. If default=True, open the tint2 default config."""
self.new()
if not default:
chooser = gtk.FileChooserDialog("Open Config File", self, gtk.FILE_CHOOSER_ACTION_OPEN, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
if self.curDir != None:
chooser.set_current_folder(self.curDir)
chooserFilter = gtk.FileFilter()
chooserFilter.set_name("All files")
chooserFilter.add_pattern("*")
chooser.add_filter(chooserFilter)
chooser.show()
response = chooser.run()
if response == gtk.RESPONSE_OK:
self.filename = chooser.get_filename()
self.curDir = os.path.dirname(self.filename)
else:
chooser.destroy()
return
chooser.destroy()
else:
self.filename = os.path.expandvars("$HOME/.config/tint2/tint2rc")
self.curDir = os.path.expandvars("$HOME/.config/tint2")
self.readTint2Config()
self.generateConfig()
self.updateStatusBar()
def parseBgs(self, string):
"""Parses the background definitions from a string."""
s = string.split("\n")
bgDefs = []
cur = -1
bgKeys = ["border_width", "background_color", "border_color"]
newString = ""
for line in s:
data = [token.strip() for token in line.split("=")]
if data[0] == "rounded": # It may be considered bad practice to
bgDefs += [{"rounded": data[1]}] # find each style definition with an
elif data[0] in bgKeys: # arbitrary value, but tint2 does the same.
bgDefs[cur][data[0]] = data[1] # This means that any existing configs must
else: # start with 'rounded'.
newString += "%s\n" % line
self.addBgDefs(bgDefs)
return newString
def parseConfig(self, string):
"""Parses the contents of a config file."""
for line in string.split("\n"):
s = line.split("=") # Create a list with KEY and VALUE
e = s[0].strip() # Strip whitespace from KEY
if e == "time1_format": # Set the VALUE of KEY
self.parseProp(self.getComponent(e), s[1], True, "time1")
elif e == "time2_format":
self.parseProp(self.getComponent(e), s[1], True, "time2")
elif e == "clock_tooltip":
self.parseProp(self.getComponent(e), s[1], True, "clock_tooltip")
elif e == "time1_timezone":
self.parseProp(self.getComponent(e), s[1], True, "time1_timezone")
elif e == "time2_timezone":
self.parseProp(self.getComponent(e), s[1], True, "time2_timezone")
elif e == "clock_tooltip_timezone":
self.parseProp(self.getComponent(e), s[1], True, "tooltip_timezone")
elif e == "systray_padding":
self.parseProp(self.getComponent(e), s[1], True, "tray")
elif e == "taskbar_active_background_id":
self.parseProp(self.getComponent(e), s[1], True, "activeBg")
else:
component = self.getComponent(e)
if component != None:
self.parseProp(self.getComponent(e), s[1])
def parseProp(self, prop, string, special=False, propType=""):
"""Parses a variable definition from the conf file and updates the correct UI widget."""
string = string.strip() # Remove whitespace from the VALUE
eType = type(prop) # Get widget type
if special: # 'Special' properties are those which are optional
if propType == "time1":
self.clockCheckButton.set_active(True)
self.clock1CheckButton.set_active(True)
elif propType == "time2":
self.clockCheckButton.set_active(True)
self.clock2CheckButton.set_active(True)
elif propType == "clock_tooltip":
self.clockCheckButton.set_active(True)
self.clockTooltipCheckButton.set_active(True)
elif propType == "time1_timezone":
self.clockTimezone1CheckButton.set_active(True)
elif propType == "time2_timezone":
self.clockTimezone2CheckButton.set_active(True)
elif propType == "tooltip_timezone":
self.clockTimezoneTooltipCheckButton.set_active(True)
elif propType == "tray":
self.trayShow.set_active(True)
elif propType == "activeBg":
self.taskbarActiveBgEnable.set_active(True)
if eType == gtk.Entry:
prop.set_text(string)
prop.activate()
elif eType == gtk.ComboBox:
# This allows us to select the correct combo-box value.
if string in ["bottom", "top", "left", "right", "center", "single_desktop", "multi_desktop", "single_monitor",
"none", "close", "shade", "iconify", "toggle", "toggle_iconify", "maximize_restore",
"desktop_left", "desktop_right", "horizontal", "vertical", "ascending", "descending",
"left2right", "right2left", "next_task", "prev_task", "minimum", "follow_size", "normal"]:
if string in ["bottom", "left", "single_desktop", "none", "horizontal", "ascending"]:
i = 0
elif string in ["top", "right", "multi_desktop", "close", "vertical", "descending", "minimum"]:
i = 1
elif string in ["center", "single_monitor", "toggle", "left2right", "follow_size", "normal"]:
i = 2
elif string in ["right2left"]:
i = 3
else:
i = ["none", "close", "toggle", "iconify", "shade", "toggle_iconify", "maximize_restore",
"desktop_left", "desktop_right", "next_task", "prev_task"].index(string)
prop.set_active(i)
else:
prop.set_active(int(string))
elif eType == gtk.CheckButton:
prop.set_active(bool(int(string)))
elif eType == gtk.FontButton:
prop.set_font_name(string)
elif eType == gtk.ColorButton:
prop.set_alpha(int(int(string) * 65535 / 100.0))
elif eType == tuple: # If a property has more than 1 value, for example the x and y co-ords
s = string.split(" ") # of the padding properties, then just we use recursion to set the
for i in range(len(prop)): # value of each associated widget.
if i >= len(s):
self.parseProp(prop[i], "0")
else:
self.parseProp(prop[i], s[i])
def quit(self, widget, event=None):
"""Asks if user would like to save file before quitting, then quits the program."""
if self.toSave:
if self.oneConfigFile:
response = gtk.RESPONSE_YES
else:
dialog = gtk.Dialog("Save config?", self, gtk.DIALOG_MODAL, (gtk.STOCK_YES, gtk.RESPONSE_YES, gtk.STOCK_NO, gtk.RESPONSE_NO, gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
dialog.get_content_area().add(gtk.Label("Save config before quitting?"))
dialog.get_content_area().set_size_request(300, 100)
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_CANCEL:
return True # Return True to stop it quitting when we hit "Cancel"
elif response == gtk.RESPONSE_NO:
gtk.main_quit()
elif response == gtk.RESPONSE_YES:
self.save()
gtk.main_quit()
else:
gtk.main_quit()
def readConf(self):
"""Reads the tintwizard configuration file - NOT tint2 config files."""
self.defaults = {"font": None, "bgColor": None, "fgColor": None, "borderColor": None, "bgCount": None}
if self.oneConfigFile:
# don't need tintwizard.conf
return
pathName = os.path.expandvars("${HOME}") + "/.config/tint2/"
if not os.path.exists(pathName + "tintwizard.conf"):
self.writeConf()
return
f = open(pathName + "tintwizard.conf", "r")
for line in f:
if "=" in line:
l = line.split("=")
if self.defaults.has_key(l[0].strip()):
self.defaults[l[0].strip()] = l[1].strip()
def readTint2Config(self):
"""Reads in from a config file."""
f = open(self.filename, "r")
string = ""
for line in f:
if (line[0] != "#") and (len(line) > 2):
string += line
f.close()
# Deselect the optional stuff, and we'll re-check them if the config has them enabled
self.clockCheckButton.set_active(False)
self.clock1CheckButton.set_active(False)
self.clock2CheckButton.set_active(False)
self.clockTooltipCheckButton.set_active(False)
self.clockTimezone1CheckButton.set_active(False)
self.clockTimezone2CheckButton.set_active(False)
self.clockTimezoneTooltipCheckButton.set_active(False)
self.trayShow.set_active(False)
self.taskbarActiveBgEnable.set_active(False)
# Remove all background styles so we can create new ones as we read them
for i in range(len(self.bgs)):
self.delBgClick(None, False)
# As we parse background definitions, we build a new string
# without the background related stuff. This means we don't
# have to read through background defs AGAIN when parsing
# the other stuff.
noBgDefs = self.parseBgs(string)
self.parseConfig(noBgDefs)
def reportBug(self, action=None):
"""Opens the bug report page in the default web browser."""
try:
webbrowser.open("http://code.google.com/p/tintwizard/issues/entry")
except:
errorDialog(self, "Your default web-browser could not be opened.\nPlease visit http://code.google.com/p/tintwizard/issues/entry")
def resetConfig(self):
"""Resets all the widgets to their default values."""
# Backgrounds
for i in range(len(self.bgs)):
self.delBgClick(prompt=False, init=True)
for i in range(self.defaults["bgCount"]):
self.addBgClick(init=True)
self.bgNotebook.set_current_page(0)
# Panel
self.panelPosY.set_active(0)
self.panelPosX.set_active(0)
self.panelOrientation.set_active(0)
self.panelSizeX.set_text(PANEL_SIZE_X)
self.panelSizeY.set_text(PANEL_SIZE_Y)
self.panelMarginX.set_text(PANEL_MARGIN_X)
self.panelMarginY.set_text(PANEL_MARGIN_Y)
self.panelPadX.set_text(PANEL_PADDING_Y)
self.panelPadY.set_text(PANEL_PADDING_Y)
self.panelSpacing.set_text(TASKBAR_SPACING)
self.panelBg.set_active(0)
self.panelMenu.set_active(0)
self.panelDock.set_active(0)
self.panelLayer.set_active(0)
self.panelMonitor.set_text(PANEL_MONITOR)
self.panelAutohide.set_active(0)
self.panelAutohideShow.set_text(PANEL_AUTOHIDE_SHOW)
self.panelAutohideHide.set_text(PANEL_AUTOHIDE_HIDE)
self.panelAutohideHeight.set_text(PANEL_AUTOHIDE_HEIGHT)
self.panelAutohideStrut.set_active(0)
# Taskbar
self.taskbarMode.set_active(0)
self.taskbarPadX.set_text(TASKBAR_PADDING_X)
self.taskbarPadY.set_text(TASKBAR_PADDING_Y)
self.taskbarSpacing.set_text(TASK_SPACING)
self.taskbarBg.set_active(0)
self.taskbarActiveBg.set_active(0)
self.taskbarActiveBgEnable.set_active(0)
# Tasks
self.taskBlinks.set_text(TASK_BLINKS)
self.taskCentreCheckButton.set_active(True)
self.taskTextCheckButton.set_active(True)
self.taskIconCheckButton.set_active(True)
self.taskMaxSizeX.set_text(TASK_MAXIMUM_SIZE_X)
self.taskMaxSizeY.set_text(TASK_MAXIMUM_SIZE_Y)
self.taskPadX.set_text(TASK_PADDING_X)
self.taskPadY.set_text(TASK_PADDING_Y)
self.taskBg.set_active(0)
self.taskActiveBg.set_active(0)
self.taskUrgentBg.set_active(0)
self.taskIconifiedBg.set_active(0)
# Icons
self.iconHue.set_text(ICON_ALPHA)
self.iconSat.set_text(ICON_SAT)
self.iconBri.set_text(ICON_BRI)
self.activeIconHue.set_text(ACTIVE_ICON_ALPHA)
self.activeIconSat.set_text(ACTIVE_ICON_SAT)
self.activeIconBri.set_text(ACTIVE_ICON_BRI)
self.urgentIconHue.set_text(URGENT_ICON_ALPHA)
self.urgentIconSat.set_text(URGENT_ICON_SAT)
self.urgentIconBri.set_text(URGENT_ICON_BRI)
self.iconifiedIconHue.set_text(ICONIFIED_ICON_ALPHA)
self.iconifiedIconSat.set_text(ICONIFIED_ICON_SAT)
self.iconifiedIconBri.set_text(ICONIFIED_ICON_BRI)
# Fonts
self.fontButton.set_font_name(self.defaults["font"])
self.fontColButton.set_alpha(65535)
self.fontColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.fontCol.set_text(self.defaults["fgColor"])
self.fontActiveColButton.set_alpha(65535)
self.fontActiveColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.fontActiveCol.set_text(self.defaults["fgColor"])
self.fontUrgentColButton.set_alpha(65535)
self.fontUrgentColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.fontUrgentCol.set_text(self.defaults["fgColor"])
self.fontIconifiedColButton.set_alpha(65535)
self.fontIconifiedColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.fontIconifiedCol.set_text(self.defaults["fgColor"])
self.fontShadowCheckButton.set_active(False)
# System Tray
self.trayShow.set_active(True)
self.trayPadX.set_text(TRAY_PADDING_X)
self.trayPadY.set_text(TRAY_PADDING_X)
self.traySpacing.set_text(TRAY_SPACING)
self.trayOrder.set_active(0)
self.trayBg.set_active(0)
self.trayMaxIconSize.set_text(TRAY_MAX_ICON_SIZE)
self.trayIconHue.set_text(TRAY_ICON_ALPHA)
self.trayIconSat.set_text(TRAY_ICON_SAT)
self.trayIconBri.set_text(TRAY_ICON_BRI)
# Clock
self.clockCheckButton.set_active(True)
self.clock1Format.set_text(CLOCK_FMT_1)
self.clock1CheckButton.set_active(True)
self.clock1FontButton.set_font_name(self.defaults["font"])
self.clock2Format.set_text(CLOCK_FMT_2)
self.clock2CheckButton.set_active(True)
self.clockTooltipFormat.set_text(CLOCK_TOOLTIP)
self.clockTooltipCheckButton.set_active(False)
self.clock2FontButton.set_font_name(self.defaults["font"])
self.clockFontColButton.set_alpha(65535)
self.clockFontColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.clockFontCol.set_text(self.defaults["fgColor"])
self.clockPadX.set_text(CLOCK_PADDING_X)
self.clockPadY.set_text(CLOCK_PADDING_Y)
self.clockBg.set_active(0)
self.clockLClick.set_text(CLOCK_LCLICK)
self.clockRClick.set_text(CLOCK_RCLICK)
self.clockTime1Timezone.set_text(CLOCK_TIME1_TIMEZONE)
self.clockTimezone1CheckButton.set_active(False)
self.clockTime2Timezone.set_text(CLOCK_TIME2_TIMEZONE)
self.clockTimezone2CheckButton.set_active(False)
self.clockTooltipTimezone.set_text(CLOCK_TOOLTIP_TIMEZONE)
self.clockTimezoneTooltipCheckButton.set_active(False)
# Tooltips
self.tooltipShow.set_active(False)
self.tooltipPadX.set_text(TOOLTIP_PADDING_X)
self.tooltipPadY.set_text(TOOLTIP_PADDING_Y)
self.tooltipShowTime.set_text(TOOLTIP_SHOW_TIMEOUT)
self.tooltipHideTime.set_text(TOOLTIP_HIDE_TIMEOUT)
self.tooltipBg.set_active(0)
self.tooltipFont.set_font_name(self.defaults["font"])
self.tooltipFontColButton.set_alpha(65535)
self.tooltipFontColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.tooltipFontCol.set_text(self.defaults["fgColor"])
# Mouse
self.mouseMiddle.set_active(0)
self.mouseRight.set_active(0)
self.mouseUp.set_active(0)
self.mouseDown.set_active(0)
# Battery
self.batteryCheckButton.set_active(False)
self.batteryLow.set_text(BATTERY_LOW)
self.batteryLowAction.set_text(BATTERY_ACTION)
self.batteryHide.set_text(BATTERY_HIDE)
self.bat1FontButton.set_font_name(self.defaults["font"])
self.bat2FontButton.set_font_name(self.defaults["font"])
self.batteryFontColButton.set_alpha(65535)
self.batteryFontColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.batteryFontCol.set_text(self.defaults["fgColor"])
self.batteryPadX.set_text(BATTERY_PADDING_Y)
self.batteryPadY.set_text(BATTERY_PADDING_Y)
self.batteryBg.set_active(0)
def save(self, widget=None, event=None):
"""Saves the generated config file."""
# This function returns the boolean status of whether or not the
# file saved, so that the apply() function knows if it should
# kill the tint2 process and apply the new config.
# If no file has been selected, force the user to "Save As..."
if self.filename == None:
return self.saveAs()
else:
self.generateConfig()
self.writeFile()
return True
def saveAs(self, widget=None, event=None):
"""Prompts the user to select a file and then saves the generated config file."""
self.generateConfig()
chooser = gtk.FileChooserDialog("Save Config File As...", self, gtk.FILE_CHOOSER_ACTION_SAVE, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
if self.curDir != None:
chooser.set_current_folder(self.curDir)
chooserFilter = gtk.FileFilter()
chooserFilter.set_name("All files")
chooserFilter.add_pattern("*")
chooser.add_filter(chooserFilter)
chooser.show()
response = chooser.run()
if response == gtk.RESPONSE_OK:
self.filename = chooser.get_filename()
if os.path.exists(self.filename):
overwrite = confirmDialog(self, "This file already exists. Overwrite this file?")
if overwrite == gtk.RESPONSE_YES:
self.writeFile()
chooser.destroy()
return True
else:
self.filename = None
chooser.destroy()
return False
else:
self.writeFile()
chooser.destroy()
return True
else:
self.filename = None
chooser.destroy()
return False
def saveAsDef(self, widget=None, event=None):
"""Saves the config as the default tint2 config."""
if confirmDialog(self, "Overwrite current tint2 default config?") == gtk.RESPONSE_YES:
self.filename = os.path.expandvars("${HOME}") + "/.config/tint2/tint2rc"
self.curDir = os.path.expandvars("${HOME}") + "/.config/tint2"
# If, for whatever reason, tint2 has no default config - create one.
if not os.path.isfile(self.filename):
f = open(self.filename, "w")
f.write("# tint2rc")
f.close()
self.generateConfig()
self.writeFile()
return True
def savePrompt(self):
"""Prompt the user to save before creating a new file."""
if confirmDialog(self, "Save current config?") == gtk.RESPONSE_YES:
self.save(None)
def switchPage(self, notebook, page, page_num):
"""Handles notebook page switch events."""
# If user selects the 'View Config' tab, update the textarea within this tab.
if notebook.get_tab_label_text(notebook.get_nth_page(page_num)) == "View Config":
self.generateConfig()
def updateComboBoxes(self, i, action="add"):
"""Updates the contents of a combo box when a background style has been added/removed."""
cbs = [self.batteryBg, self.clockBg, self.taskbarBg, self.taskbarActiveBg, self.trayBg, self.taskActiveBg, self.taskBg, self.panelBg, self.tooltipBg, self.taskUrgentBg, self.taskIconifiedBg]
if action == "add":
for cb in cbs:
cb.append_text(str(i+1))
else:
for cb in cbs:
if cb.get_active() == i: # If background is selected, set to a different value
cb.set_active(0)
cb.remove_text(i)
def updateStatusBar(self, message="", change=False):
"""Updates the message on the statusbar. A message can be provided,
and if change is set to True (i.e. something has been modified) then
an appropriate symbol [*] is shown beside filename."""
contextID = self.statusBar.get_context_id("")
self.statusBar.pop(contextID)
if not message:
message = "%s %s" % (self.filename or "New Config File", "[*]" if change else "")
self.statusBar.push(contextID, message)
def writeConf(self):
"""Writes the tintwizard configuration file."""
confStr = "#Start\n[defaults]\n"
for key in self.defaults:
confStr += "%s = %s\n" % (key, str(self.defaults[key]))
confStr += "#End\n"
pathName = os.path.expandvars("${HOME}") + "/.config/tint2/"
f = open(pathName+"tintwizard.conf", "w")
f.write(confStr)
f.close()
def writeFile(self):
"""Writes the contents of the config text buffer to file."""
try:
f = open(self.filename, "w")
f.write(self.configBuf.get_text(self.configBuf.get_start_iter(), self.configBuf.get_end_iter()))
f.close()
self.toSave = False
self.curDir = os.path.dirname(self.filename)
self.updateStatusBar()
except IOError:
errorDialog(self, "Could not save file")
# General use functions
def createLabel(parent, text="", gridX=0, gridY=0, sizeX=1, sizeY=1, xPadding=0):
"""Creates a label and adds it to a parent widget."""
temp = gtk.Label(text)
temp.set_alignment(0, 0.5)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xpadding=xPadding)
return temp
def createComboBox(parent, choices=["null"], active=0, gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None):
"""Creates a combo box with text choices and adds it to a parent widget."""
temp = gtk.combo_box_new_text()
for choice in choices:
temp.append_text(choice)
temp.set_active(active)
if handler != None:
temp.connect("changed", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def createEntry(parent, maxSize, width, text="", gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None, name=""):
"""Creates a text entry widget and adds it to a parent widget."""
temp = gtk.Entry(maxSize)
temp.set_width_chars(width)
temp.set_text(text)
temp.set_name(name)
if handler != None:
temp.connect("changed", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def createCheckButton(parent, text="", active=False, gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None):
"""Creates a checkbox widget and adds it to a parent widget."""
temp = gtk.CheckButton(text if text != "" else None)
temp.set_active(active)
temp.connect("toggled", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def createButton(parent, text="", stock=None, name="", gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None):
"""Creates a button widget and adds it to a parent widget."""
if stock:
temp = gtk.Button(text, stock)
else:
temp = gtk.Button(text)
temp.set_name(name)
temp.connect("clicked", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def createFontButton(parent, font, gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None):
"""Creates a font button widget and adds it to a parent widget."""
temp = gtk.FontButton()
temp.set_font_name(font)
temp.connect("font-set", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def createColorButton(parent, color="#000000", useAlpha=True, name="", gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None):
temp = gtk.ColorButton(gtk.gdk.color_parse(color))
temp.set_use_alpha(useAlpha)
temp.set_name(name)
temp.connect("color-set", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def confirmDialog(parent, message):
"""Creates a confirmation dialog and returns the response."""
dialog = gtk.MessageDialog(parent, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, message)
dialog.show()
response = dialog.run()
dialog.destroy()
return response
def errorDialog(parent=None, message="An error has occured!"):
"""Creates an error dialog."""
dialog = gtk.MessageDialog(parent, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, message)
dialog.show()
dialog.run()
dialog.destroy()
def numToHex(n):
"""Convert integer n in range [0, 15] to hex."""
try:
return ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"][n]
except:
return -1
def rgbToHex(r, g, b):
"""Constructs a 6 digit hex representation of color (r, g, b)."""
r2 = trunc(r / 65535.0 * 255)
g2 = trunc(g / 65535.0 * 255)
b2 = trunc(b / 65535.0 * 255)
return "#%s%s%s%s%s%s" % (numToHex(r2 / 16), numToHex(r2 % 16), numToHex(g2 / 16), numToHex(g2 % 16), numToHex(b2 / 16), numToHex(b2 % 16))
def trunc(n):
"""Truncate a floating point number, rounding up or down appropriately."""
c = math.fabs(math.ceil(n) - n)
f = math.fabs(math.floor(n) - n)
if c < f:
return int(math.ceil(n))
else:
return int(math.floor(n))
# Direct execution of application
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "-version":
print NAME, VERSION
exit()
tw = TintWizardGUI()
tw.main()
|
giresun28/malfs-milis
|
bin/tintwizard.py
|
Python
|
mit
| 110,544
|
[
"VisIt"
] |
ec7ec29dd6fe183b5bc8006d673944f8dbf32121de5c9ec0120474d2e1245a38
|
"""Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of ``Gaussian Processes
for Machine Learning'' (GPML) by Rasmussen and Williams.
In addition to standard sklearn estimator API, GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y: boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
alpha_: array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_: float
The log-marginal-likelihood of self.kernel_.theta
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self.y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self.y_train_mean
else:
self.y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self.rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
self.L_ = cholesky(K, lower=True) # Line 2
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self.y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of self.kernel_.theta is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
|
jmschrei/scikit-learn
|
sklearn/gaussian_process/gpr.py
|
Python
|
bsd-3-clause
| 18,634
|
[
"Gaussian"
] |
176bb9396b57c2a1c4dd05098419c5fa8eb440ac03bd6ea86228ca22e30aff9a
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto_nag.bzcleaner import BzCleaner
class SurveySecurityBugs(BzCleaner):
def __init__(self):
super(SurveySecurityBugs, self).__init__()
self.changes_per_bug = {}
def description(self):
return "Submit survey to assignee of a security bug"
def get_bz_params(self, date):
params = {
# maybe we need more fields to do our changes (?)
"include_fields": ["assigned_to", "whiteboard"],
# find fixed bugs
"bug_status": "RESOLVED,VERIFIED",
"resolution": "FIXED",
# find bugs only in these products
"f5": "product",
"o5": "anywordssubstr",
"v5": "Core,DevTools,Firefox,GeckoView,NSPR,NSS,Toolkit,WebExtensions",
# bugs changed to RESOLVED in last month
"chfield": "bug_status",
"chfieldfrom": "-1m",
"chfieldto": "NOW",
"chfieldvalue": "RESOLVED",
# keywords has either sec-critical or sec-high
"f1": "keywords",
"o1": "anywords",
"v1": "sec-critical,sec-high",
# whiteboard does not have [sec-survey] (to avoid us asking twice)
"f2": "status_whiteboard",
"o2": "notsubstring",
"v2": "[sec-survey]",
# has at least one attachment (i.e., hopefully a patch)
"f3": "attachments.count",
"o3": "greaterthan",
"v3": "0",
# assigned to any of those we have agreed to help out
"f4": "assigned_to",
"o4": "anywords",
"v4": ",".join(self.get_config("to_reach_out", default=[])),
}
return params
def handle_bug(self, bug, data):
assignee = bug["assigned_to"]
bugid = str(bug["id"])
new_whiteboard = bug["whiteboard"] + "[sec-survey]"
self.changes_per_bug[bugid] = {
"comment": {"body": self.comment_tpl_for_bugid(bugid)},
"whiteboard": new_whiteboard,
"flags": [
{
"name": "needinfo",
"requestee": assignee,
"status": "?",
"new": "true",
}
],
}
return bug
def get_autofix_change(self):
return self.changes_per_bug
def comment_tpl_for_bugid(self, bugid):
URL = f"https://docs.google.com/forms/d/e/1FAIpQLSe9uRXuoMK6tRglbNL5fpXbun_oEb6_xC2zpuE_CKA_GUjrvA/viewform?usp=pp_url&entry.2124261401=https%3A%2F%2Fbugzilla.mozilla.org%2Fshow_bug.cgi%3Fid%3D{bugid}"
return f"As part of a security bug pattern analysis, we are requesting your help with a high level analysis of this bug. It is our hope to develop static analysis (or potentially runtime/dynamic analysis) in the future to identify classes of bugs.\n\nPlease visit [this google form]({URL}) to reply."
if __name__ == "__main__":
SurveySecurityBugs().run()
|
mozilla/bztools
|
auto_nag/scripts/survey_sec_bugs.py
|
Python
|
bsd-3-clause
| 3,177
|
[
"VisIt"
] |
2e960419b8eae82e025da0ffc12a49f48d8bc7809a8312d1f1483026124742a0
|
# -*- coding: utf-8 -*-
# Copyright 2015-2016 LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import logging
from odoo import models, fields
from odoo.addons.connector.unit.mapper import mapping
from ..unit.backend_adapter import CarepointCRUDAdapter
from ..unit.mapper import CarepointImportMapper
from ..backend import carepoint
from ..unit.import_synchronizer import (DelayedBatchImporter,
CarepointImporter,
import_record,
)
from .fdb_pem_moe import FdbPemMoeAdapter
_logger = logging.getLogger(__name__)
class FdbPemMogc(models.Model):
_inherit = 'fdb.pem.mogc'
carepoint_bind_ids = fields.One2many(
comodel_name='carepoint.fdb.pem.mogc',
inverse_name='odoo_id',
string='Carepoint Bindings',
)
class CarepointFdbPemMogc(models.Model):
_name = 'carepoint.fdb.pem.mogc'
_inherit = 'carepoint.binding'
_inherits = {'fdb.pem.mogc': 'odoo_id'}
_description = 'Carepoint FdbPemMogc'
_cp_lib = 'fdb_pem_mogc' # Name of model in Carepoint lib (snake_case)
odoo_id = fields.Many2one(
string='FdbPemMogc',
comodel_name='fdb.pem.mogc',
required=True,
ondelete='restrict'
)
@carepoint
class FdbPemMogcAdapter(CarepointCRUDAdapter):
_model_name = 'carepoint.fdb.pem.mogc'
@carepoint
class FdbPemMogcBatchImporter(DelayedBatchImporter):
""" Import the Carepoint FdbPemMogcs.
For every product category in the list, a delayed job is created.
Import from a date
"""
_model_name = ['carepoint.fdb.pem.mogc']
@carepoint
class FdbPemMogcImportMapper(CarepointImportMapper):
_model_name = 'carepoint.fdb.pem.mogc'
direct = [
('update_yn', 'update_yn'),
('pemono', 'pemono'),
]
@mapping
def gcn_id(self, record):
binder = self.binder_for('carepoint.fdb.gcn')
gcn_id = binder.to_odoo(record['gcn_seqno'])
return {'gcn_id': gcn_id}
@mapping
def carepoint_id(self, record):
return {'carepoint_id': record['gcn_seqno']}
@carepoint
class FdbPemMogcImporter(CarepointImporter):
_model_name = ['carepoint.fdb.pem.mogc']
_base_mapper = FdbPemMogcImportMapper
def _import_dependencies(self):
""" Import depends for record """
record = self.carepoint_record
self._import_dependency(record['gcn_seqno'],
'carepoint.fdb.gcn')
def _after_import(self, binding):
pem_adapter = self.unit_for(
FdbPemMoeAdapter, model='carepoint.fdb.pem.moe',
)
record = self.carepoint_record
domain = {'pemono': record['pemono']}
attributes = ['pemono', 'pemono_sn']
for rec_id in pem_adapter.search_read(attributes, **domain):
import_record.delay(
self.session,
'carepoint.fdb.pem.moe',
self.backend_record.id,
'{0},{1}'.format(rec_id['pemono'], rec_id['pemono_sn']),
force=False,
)
|
laslabs/odoo-connector-carepoint
|
connector_carepoint/models/fdb_pem_mogc.py
|
Python
|
agpl-3.0
| 3,143
|
[
"MOE"
] |
528f0069ccafa763abd36cd0254de0790d29014e976d9174264ce32d98e3810f
|
"""
Testing the FileCatalog logic
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
import mock
import DIRAC
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC import S_OK, S_ERROR
__RCSID__ = "$Id $"
current_module = sys.modules[__name__]
class GenericCatalog(object):
""" Dummy catalog """
def __init__(self, name, nb_read, nb_read_no_lfn, nb_write, nb_write_no_lfn):
self. w_method = []
self.r_method = []
self.no_lfn = []
self.name = name
self.__generateMethods("read", self.r_method, nb_read, nb_read_no_lfn)
self.__generateMethods("write", self.w_method, nb_write, nb_write_no_lfn)
def hasCatalogMethod(self, methName):
return methName in self.w_method or methName in self.r_method
def __generateMethods(self, mType, methodList, nb_method, nb_method_no_lfn):
""" Generates methods, read or write, and adds them to the appropriate list,
including no_lfn if needed.
The no_lfn methods are taken starting from the end.
:param mType: read or write
:param methodList: in which list to put the names
:param nb_methods: number of methods
:param nb_methods_no_lfn: number of these methods that should go in no lfn
"""
no_lfn_start = nb_method - nb_method_no_lfn + 1
for catId in range(1, nb_method + 1):
mName = "%s%d" % (mType, catId)
methodList.append(mName)
if catId >= no_lfn_start:
self.no_lfn.append(mName)
def getInterfaceMethods(self):
return self.r_method, self.w_method, self.no_lfn
def generic(self, *args, **kwargs):
""" Returns a status depending on the input.
For a normal read or write method, it looks for the catalog
name in the LFN. If it is there, it looks at which status it is
supposed to return: S_Error, or put the LFN in the Failed dict.
"""
successful = {}
failed = {}
if self.call in self.no_lfn:
if not args:
return S_OK("yeah")
ret = args[0]
if self.name in ret:
return S_ERROR("%s.%s did not go well" % (self.name, self.call))
else:
return S_OK("yeah")
lfns = args[0]
for lfn in lfns:
lfnSplit = lfn.split('/')
try:
idName = lfnSplit.index(self.name)
retType = lfnSplit[idName + 1]
if retType == "Error":
return S_ERROR("%s.%s did not go well" % (self.name, self.call))
elif retType == "Failed":
failed[lfn] = "%s.%s failed for %s" % (self.name, self.call, lfn)
except ValueError:
successful[lfn] = "yeah"
return S_OK({'Successful': successful, 'Failed': failed})
def __getattr__(self, meth):
self.call = meth
return self.generic
def mock_fc_getSelectedCatalogs(self, desiredCatalogs):
""" Mock the getSelectedCatalogs method
The name of the catalog should contain the following info, separated by '_':
* the name of the catalog
* True or False if it is a Master
* True or False for Read
* True or False for Write
* nb of read op
* nb of read no lfn
* nb of write op
* nb of write no lfn
"""
for catalogDescription in desiredCatalogs:
name, master, read, write, nb_read, nb_read_no_lfn, nb_write, nb_write_no_lfn = catalogDescription.split('_')
master = eval(master)
read = eval(read)
write = eval(write)
nb_read = eval(nb_read)
nb_read_no_lfn = eval(nb_read_no_lfn)
nb_write = eval(nb_write)
nb_write_no_lfn = eval(nb_write_no_lfn)
obj = GenericCatalog(name, nb_read, nb_read_no_lfn, nb_write, nb_write_no_lfn)
if read:
self.readCatalogs.append((name, obj, master))
if write:
self.writeCatalogs.append((name, obj, master))
return S_OK()
def mock_fc_getEligibleCatalogs(self):
""" We return an object that always returns True
if we ask whether an item is in it
"""
class mockList(object):
def __contains__(self, item):
return True
x = mockList()
return S_OK(x)
def writeList(count, reverse=None):
""" If reverse is none, returns write1, ...., write<count>
if reverse is set, returns a list with <count> elements backward from read<reverse>
"""
if reverse:
return ["write%s" % i for i in range(reverse, reverse - count, -1)]
return ["write%s" % i for i in range(1, count + 1)]
def readList(count, reverse=None):
""" If reverse is none, returns read1, ...., read<count>
if reverse is set, returns a list with <count> elements backward from read<reverse>
"""
if reverse:
return ["read%s" % i for i in range(reverse, reverse - count, -1)]
return ["read%s" % i for i in range(1, count + 1)]
class TestInitialization(unittest.TestCase):
""" Tests the logic of the init mechanism
"""
@mock.patch.object(
DIRAC.Resources.Catalog.FileCatalog.FileCatalog,
'_getSelectedCatalogs',
side_effect=mock_fc_getSelectedCatalogs,
autospec=True) # autospec is for the binding of the method...
@mock.patch.object(
DIRAC.Resources.Catalog.FileCatalog.FileCatalog,
'_getEligibleCatalogs',
side_effect=mock_fc_getEligibleCatalogs,
autospec=True) # autospec is for the binding of the method...
def test_01_init(self, mk_getSelectedCatalogs, mk_getEligibleCatalogs):
""" Check logic of init"""
# We should not be able to have 2 masters
twoMastersFc = FileCatalog(catalogs=['c1_True_True_True_5_2_2_0', 'c2_True_True_True_5_2_2_0'])
self.assertTrue(not twoMastersFc.isOK())
# One master should be ok
oneMasterFc = FileCatalog(catalogs=['c1_True_True_True_2_0_2_2', 'c2_False_True_True_3_1_4_2'])
self.assertTrue(oneMasterFc.isOK())
# With a master, the write method should be the method of the master
self.assertEqual(sorted(oneMasterFc.write_methods), writeList(2))
# The read methods and no_lfn should be from all catalogs
self.assertEqual(sorted(oneMasterFc.ro_methods), readList(3))
# The no_lfns methods are from everywhere
# write1 and write2 from c1
# write3, write4, read3 from c2
self.assertEqual(sorted(oneMasterFc.no_lfn_methods), sorted(readList(1, reverse=3) + writeList(4)))
# No master should be ok
noMasterFc = FileCatalog(catalogs=['c1_False_True_True_2_0_2_0', 'c2_False_True_True_3_0_4_0'])
self.assertTrue(oneMasterFc.isOK())
# With no master, the write method should be from all catalogs
self.assertEqual(sorted(noMasterFc.write_methods), writeList(4))
# The read methods and no_lfn should be from all catalogs
self.assertEqual(sorted(noMasterFc.ro_methods), readList(3))
class TestWrite(unittest.TestCase):
""" Tests of the w_execute method"""
@mock.patch.object(
DIRAC.Resources.Catalog.FileCatalog.FileCatalog,
'_getSelectedCatalogs',
side_effect=mock_fc_getSelectedCatalogs,
autospec=True) # autospec is for the binding of the method...
@mock.patch.object(
DIRAC.Resources.Catalog.FileCatalog.FileCatalog,
'_getEligibleCatalogs',
side_effect=mock_fc_getEligibleCatalogs,
autospec=True) # autospec is for the binding of the method...
def test_01_Normal(self, mk_getSelectedCatalogs, mk_getEligibleCatalogs):
"""Test behavior with one master and only standard write methods"""
fc = FileCatalog(catalogs=['c1_True_True_True_2_0_2_0', 'c2_False_True_True_3_0_1_0'])
# Test a write method which is not in the master catalog
with self.assertRaises(AttributeError):
fc.write4('/lhcb/toto')
# Test a write method which works for everybody
lfn = '/lhcb/toto'
res = fc.write1(lfn)
self.assertTrue(res['OK'])
self.assertTrue(lfn in res['Value']['Successful'])
self.assertEqual(sorted(['c1', 'c2']), sorted(res['Value']['Successful'][lfn]))
self.assertTrue(not res['Value']['Failed'])
# Test a write method that only the master has
lfn = '/lhcb/toto'
res = fc.write2(lfn)
self.assertTrue(res['OK'])
self.assertTrue(lfn in res['Value']['Successful'])
self.assertEqual(['c1'], sorted(res['Value']['Successful'][lfn]))
self.assertTrue(not res['Value']['Failed'])
# Test a write method that makes an error for master
# We should get an error
lfn = '/lhcb/c1/Error'
res = fc.write1(lfn)
self.assertTrue(not res['OK'])
# Test a write method that fails for master
# The lfn should be in failed and only attempted for the master
lfn = '/lhcb/c1/Failed'
res = fc.write1(lfn)
self.assertTrue(res['OK'])
self.assertTrue(not res['Value']['Successful'])
self.assertEqual(['c1'], sorted(res['Value']['Failed'][lfn]))
# Test a write method that makes an error for non master
# The lfn should be in failed for non master and successful for the master
lfn = '/lhcb/c2/Error'
res = fc.write1(lfn)
self.assertTrue(res['OK'])
self.assertEqual(['c1'], sorted(res['Value']['Successful'][lfn]))
self.assertEqual(['c2'], sorted(res['Value']['Failed'][lfn]))
# Test a write method that fails for non master
# The lfn should be in failed for non master and successful for the master
lfn = '/lhcb/c2/Failed'
res = fc.write1(lfn)
self.assertTrue(res['OK'])
self.assertEqual(['c1'], sorted(res['Value']['Successful'][lfn]))
self.assertEqual(['c2'], sorted(res['Value']['Failed'][lfn]))
@mock.patch.object(
DIRAC.Resources.Catalog.FileCatalog.FileCatalog,
'_getSelectedCatalogs',
side_effect=mock_fc_getSelectedCatalogs,
autospec=True) # autospec is for the binding of the method...
@mock.patch.object(
DIRAC.Resources.Catalog.FileCatalog.FileCatalog,
'_getEligibleCatalogs',
side_effect=mock_fc_getEligibleCatalogs,
autospec=True) # autospec is for the binding of the method...
def test_02_condParser(self, mk_getSelectedCatalogs, mk_getEligibleCatalogs):
"""Test behavior of write methode when using FCConditionParser"""
fc = FileCatalog(catalogs=['c1_True_True_True_2_0_2_0', 'c2_False_True_True_3_0_1_0', 'c3_False_True_True_3_0_1_0'])
# No condition for c3, so it should always pass
fcConditions = {'c1': "Filename=find('c1_pass')",
'c2': "Filename=find('c2_pass')"}
# Everything pass everywhere
lfn1 = '/lhcb/c1_pass/c2_pass/lfn1'
lfn2 = '/lhcb/c1_pass/c2_pass/lfn2'
res = fc.write1([lfn1, lfn2],
fcConditions=fcConditions)
self.assertTrue(res['OK'])
self.assertEqual(sorted(res['Value']['Successful']), sorted([lfn1, lfn2]))
self.assertEqual(sorted(res['Value']['Successful'][lfn1]), sorted(['c1', 'c2', 'c3']))
self.assertEqual(sorted(res['Value']['Successful'][lfn2]), sorted(['c1', 'c2', 'c3']))
self.assertTrue(not res['Value']['Failed'])
# Everything pass for the master, only lfn2 for c2
lfn1 = '/lhcb/c1_pass/lfn1'
lfn2 = '/lhcb/c1_pass/c2_pass/lfn2'
res = fc.write1([lfn1, lfn2],
fcConditions=fcConditions)
self.assertTrue(res['OK'])
self.assertEqual(sorted(res['Value']['Successful']), sorted([lfn1, lfn2]))
self.assertEqual(sorted(res['Value']['Successful'][lfn1]), ['c1', 'c3'])
self.assertEqual(sorted(res['Value']['Successful'][lfn2]), sorted(['c1', 'c2', 'c3']))
self.assertTrue(not res['Value']['Failed'])
# One is not valid for the master, so we do nothing
lfn1 = '/lhcb/c2_pass/lfn1'
lfn2 = '/lhcb/c1_pass/c2_pass/lfn2'
res = fc.write1([lfn1, lfn2],
fcConditions=fcConditions)
self.assertTrue(not res['OK'])
@mock.patch.object(
DIRAC.Resources.Catalog.FileCatalog.FileCatalog,
'_getSelectedCatalogs',
side_effect=mock_fc_getSelectedCatalogs,
autospec=True) # autospec is for the binding of the method...
@mock.patch.object(
DIRAC.Resources.Catalog.FileCatalog.FileCatalog,
'_getEligibleCatalogs',
side_effect=mock_fc_getEligibleCatalogs,
autospec=True) # autospec is for the binding of the method...
def test_03_noLFN(self, mk_getSelectedCatalogs, mk_getEligibleCatalogs):
""" Test the no_lfn methods """
fc = FileCatalog(catalogs=['c1_True_True_True_2_0_2_1', 'c2_False_True_True_3_0_2_1'])
# all good
res = fc.write2("/lhcb/toto")
self.assertTrue(res['OK'])
self.assertEqual(res['Value'], 'yeah')
# Fail in the master
res = fc.write2("/lhcb/c1")
self.assertTrue(not res['OK'])
self.assertTrue('Value' not in res)
# Fail in the non master
res = fc.write2("/lhcb/c2")
self.assertTrue(res['OK'])
self.assertTrue('Value' in res)
self.assertEqual(res['Value'], 'yeah')
class TestRead(unittest.TestCase):
""" Tests of the w_execute method"""
@mock.patch.object(
DIRAC.Resources.Catalog.FileCatalog.FileCatalog,
'_getSelectedCatalogs',
side_effect=mock_fc_getSelectedCatalogs,
autospec=True) # autospec is for the binding of the method...
@mock.patch.object(
DIRAC.Resources.Catalog.FileCatalog.FileCatalog,
'_getEligibleCatalogs',
side_effect=mock_fc_getEligibleCatalogs,
autospec=True) # autospec is for the binding of the method...
def test_01_oneMasterNormal(self, mk_getSelectedCatalogs, mk_getEligibleCatalogs):
"""Test behavior with one master and only standard read methods"""
fc = FileCatalog(catalogs=['c1_True_True_True_2_0_2_0', 'c2_False_True_True_3_0_1_0'])
# Test a write method which is not in the master catalog
with self.assertRaises(AttributeError):
fc.write4('/lhcb/toto')
# Test a write method which works for everybody
lfn = '/lhcb/toto'
res = fc.write1(lfn)
self.assertTrue(res['OK'])
self.assertTrue(lfn in res['Value']['Successful'])
self.assertEqual(sorted(['c1', 'c2']), sorted(res['Value']['Successful'][lfn]))
self.assertTrue(not res['Value']['Failed'])
# Test a write method that only the master has
lfn = '/lhcb/toto'
res = fc.write2(lfn)
self.assertTrue(res['OK'])
self.assertTrue(lfn in res['Value']['Successful'])
self.assertEqual(['c1'], sorted(res['Value']['Successful'][lfn]))
self.assertTrue(not res['Value']['Failed'])
# Test a write method that makes an error for master
# We should get an error
lfn = '/lhcb/c1/Error'
res = fc.write1(lfn)
self.assertTrue(not res['OK'])
# Test a write method that fails for master
# The lfn should be in failed and only attempted for the master
lfn = '/lhcb/c1/Failed'
res = fc.write1(lfn)
self.assertTrue(res['OK'])
self.assertTrue(not res['Value']['Successful'])
self.assertEqual(['c1'], sorted(res['Value']['Failed'][lfn]))
# Test a write method that makes an error for non master
# The lfn should be in failed for non master and successful for the master
lfn = '/lhcb/c2/Error'
res = fc.write1(lfn)
self.assertTrue(res['OK'])
self.assertEqual(['c1'], sorted(res['Value']['Successful'][lfn]))
self.assertEqual(['c2'], sorted(res['Value']['Failed'][lfn]))
# Test a write method that fails for non master
# The lfn should be in failed for non master and successful for the master
lfn = '/lhcb/c2/Failed'
res = fc.write1(lfn)
self.assertTrue(res['OK'])
self.assertEqual(['c1'], sorted(res['Value']['Successful'][lfn]))
self.assertEqual(['c2'], sorted(res['Value']['Failed'][lfn]))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestInitialization)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestWrite))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestRead))
unittest.TextTestRunner(verbosity=2).run(suite)
|
yujikato/DIRAC
|
src/DIRAC/Resources/Catalog/test/Test_FileCatalog.py
|
Python
|
gpl-3.0
| 15,775
|
[
"DIRAC"
] |
2f9465032ef51495eb87493f0abf359a6133d8ef38488b490031a7751da6a144
|
import pathlib
import numpy as np
import pytest
from util import full
from loprop.core import penalty_function, AU2ANG, pairs
from loprop.dalton import MolFragDalton
from . import h2o_beta_trans_data as ref
from .common import LoPropTestCase
thisdir = pathlib.Path(__file__).parent
case = "h2o_beta_trans"
tmpdir = thisdir / case / "tmp"
@pytest.fixture
def molfrag(request):
cls = request.param
return cls(tmpdir, freqs=(0.0,), pf=penalty_function(2.0 / AU2ANG ** 2))
@pytest.mark.parametrize("molfrag", [MolFragDalton], ids=["dalton"], indirect=True)
class TestNew(LoPropTestCase):
# def setup(self, molfrag):
# self.m = MolFrag(tmpdir, freqs=(0.0, ), pf=penalty_function(2.0/AU2ANG**2))
# self.maxDiff = None
#
# def teardown(self, molfrag):
# pass
def test_nuclear_charge(self, molfrag):
Z = molfrag.Z
self.assert_allclose(Z, ref.Z)
def test_coordinates_au(self, molfrag):
R = molfrag.R
self.assert_allclose(R, ref.R)
def test_default_gauge(self, molfrag):
self.assert_allclose(ref.Rc, molfrag.Rc)
def test_total_charge(self, molfrag):
Qtot = molfrag.Qab.sum()
self.assert_allclose(Qtot, ref.Qtot)
def test_charge(self, molfrag):
Qaa = molfrag.Qab.diagonal()
self.assert_allclose(ref.Q, Qaa)
def test_total_dipole(self, molfrag):
# molecular dipole moment wrt gauge center gc
Dtot = molfrag.Dab.sum(axis=2).sum(axis=1).view(full.matrix)
Qa = molfrag.Qab.diagonal()
Q = Qa.sum()
Dtot += Qa @ molfrag.R - Q * molfrag.Rc
self.assert_allclose(Dtot, ref.Dtot)
def test_dipole_allbonds(self, molfrag):
D = full.matrix(ref.D.shape)
Dab = molfrag.Dab
for ab, a, b in pairs(molfrag.noa):
D[:, ab] += Dab[:, a, b]
if a != b:
D[:, ab] += Dab[:, b, a]
self.assert_allclose(D, ref.D)
def test_dipole_allbonds_sym(self, molfrag):
Dsym = molfrag.Dsym
self.assert_allclose(Dsym, ref.D)
def test_dipole_nobonds(self, molfrag):
Daa = molfrag.Dab.sum(axis=2).view(full.matrix)
self.assert_allclose(Daa, ref.Daa)
def test_quadrupole_total(self, molfrag):
QUc = molfrag.QUc
self.assert_allclose(QUc, ref.QUc)
def test_nuclear_quadrupole(self, molfrag):
QUN = molfrag.QUN
self.assert_allclose(QUN, ref.QUN)
def test_quadrupole_allbonds(self, molfrag):
QU = full.matrix(ref.QU.shape)
QUab = molfrag.QUab
for ab, a, b in pairs(molfrag.noa):
QU[:, ab] += QUab[:, a, b]
if a != b:
QU[:, ab] += QUab[:, b, a]
self.assert_allclose(QU, ref.QU)
def test_quadrupole_allbonds_sym(self, molfrag):
QUsym = molfrag.QUsym
self.assert_allclose(QUsym, ref.QU)
def test_quadrupole_nobonds(self, molfrag):
QUaa = (molfrag.QUab + molfrag.dQUab).sum(axis=2).view(full.matrix)
self.assert_allclose(QUaa, ref.QUaa)
def test_Fab(self, molfrag):
Fab = molfrag.Fab
self.assert_allclose(Fab, ref.Fab)
def test_molcas_shift(self, molfrag):
Fab = molfrag.Fab
Lab = Fab + molfrag.sf(Fab)
self.assert_allclose(Lab, ref.Lab)
def test_total_charge_shift(self, molfrag):
dQ = molfrag.dQa[0].sum(axis=0).view(full.matrix)
dQref = [0.0, 0.0, 0.0]
self.assert_allclose(dQref, dQ)
def test_total_charge_shift2(self, molfrag):
d2Q = molfrag.d2Qa[0].sum(axis=0).view(full.matrix)
d2Qref = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.assert_allclose(d2Qref, d2Q)
def test_atomic_charge_shift(self, molfrag):
dQa = molfrag.dQa[0]
dQaref = (ref.dQa[:, 1::2] - ref.dQa[:, 2::2]) * (1 / (2 * ref.ff))
self.assert_allclose(dQa, dQaref, atol=0.006)
def test_lagrangian(self, molfrag):
# values per "perturbation" as in atomic_charge_shift below
la = molfrag.la[0]
laref = (ref.la[:, 0:6:2] - ref.la[:, 1:6:2]) * (1 / (2 * ref.ff))
# The sign difference is because mocas sets up rhs with opposite sign
self.assert_allclose(-laref, la, atol=100)
def test_bond_charge_shift(self, molfrag):
dQab = molfrag.dQab[0]
noa = molfrag.noa
dQabref = (ref.dQab[:, 1:7:2] - ref.dQab[:, 2:7:2]) * (1 / (2 * ref.ff))
dQabcmp = full.matrix((3, 3))
ab = 0
for a in range(noa):
for b in range(a):
dQabcmp[ab, :] = dQab[a, b, :]
ab += 1
# The sign difference is because mocas sets up rhs with opposite sign
self.assert_allclose(-dQabref, dQabcmp, atol=0.006)
def test_bond_charge_shift_sum(self, molfrag):
dQa = molfrag.dQab[0].sum(axis=1).view(full.matrix)
dQaref = molfrag.dQa[0]
self.assert_allclose(dQa, dQaref)
def test_bond_charge_shift_sum2(self, molfrag):
d2Qa = molfrag.d2Qab[0].sum(axis=1).view(full.matrix)
d2Qaref = molfrag.d2Qa[0]
self.assert_allclose(d2Qa, d2Qaref)
def test_polarizability_total(self, molfrag):
Am = molfrag.Am[0]
self.assert_allclose(Am, ref.Am, 0.015)
def test_beta_zxx(self, molfrag):
r = molfrag.x
D2k = molfrag.D2k
z = r[2].unblock()
xx = D2k[("XDIPLEN XDIPLEN ", 0.0, 0.0)].unblock()
bzxx = -z & xx
self.assert_allclose(bzxx, ref.Bm[2, 0], 0.005)
def test_beta_xzx(self, molfrag):
r = molfrag.x
D2k = molfrag.D2k
x = r[0].unblock()
zx = D2k[("ZDIPLEN XDIPLEN ", 0.0, 0.0)].unblock()
bxzx = -x & zx
self.assert_allclose(bxzx, ref.Bm[0, 2], 0.005)
def test_beta_xxz(self, molfrag):
r = molfrag.x
D2k = molfrag.D2k
x = r[0].unblock()
xz = D2k[("XDIPLEN ZDIPLEN ", 0.0, 0.0)].unblock()
bxxz = -x & xz
self.assert_allclose(bxxz, ref.Bm[0, 2], 0.005)
def test_beta_yyz(self, molfrag):
r = molfrag.x
D2k = molfrag.D2k
y = r[1].unblock()
yz = D2k[("YDIPLEN ZDIPLEN ", 0.0, 0.0)].unblock()
byyz = -y & yz
self.assert_allclose(byyz, ref.Bm[1, 4], 0.005)
def test_beta_zyy(self, molfrag):
r = molfrag.x
D2k = molfrag.D2k
z = r[2].unblock()
yy = D2k[("YDIPLEN YDIPLEN ", 0.0, 0.0)].unblock()
bzyy = -z & yy
self.assert_allclose(bzyy, ref.Bm[2, 3], 0.005)
def test_beta_zzz(self, molfrag):
r = molfrag.x
D2k = molfrag.D2k
z = r[2].unblock()
zz = D2k[("ZDIPLEN ZDIPLEN ", 0.0, 0.0)].unblock()
bzzz = -z & zz
self.assert_allclose(bzzz, ref.Bm[2, 5], 0.005)
def test_hyperpolarizability_total(self, molfrag):
Bm = molfrag.Bm[0]
ref.Bm
self.assert_allclose(Bm, ref.Bm, 0.005)
def test_polarizability_allbonds_molcas_internal(self, molfrag):
O = ref.O
H1O = ref.H1O
H1 = ref.H1
H2O = ref.H2O
H2H1 = ref.H2H1
H2 = ref.H2
rMP = ref.rMP
RO, RH1, RH2 = molfrag.R
ROx, ROy, ROz = RO
RH1x, RH1y, RH1z = RH1
RH2x, RH2y, RH2z = RH2
ihff = 1 / (2 * ref.ff)
q, x, y, z = range(4)
dx1, dx2, dy1, dy2, dz1, dz2 = 1, 2, 3, 4, 5, 6
o, h1o, h1, h2o, h2h1, h2 = range(6)
Oxx = ihff * (rMP[x, dx1, o] - rMP[x, dx2, o])
Oyx = (
ihff
* (rMP[y, dx1, o] - rMP[y, dx2, o] + rMP[x, dy1, o] - rMP[x, dy2, o])
/ 2
)
Oyy = ihff * (rMP[y, dy1, o] - rMP[y, dy2, o])
Ozx = (
ihff
* (rMP[z, dx1, o] - rMP[z, dx2, o] + rMP[x, dz1, o] - rMP[x, dz2, o])
/ 2
)
Ozy = (
ihff
* (rMP[z, dy1, o] - rMP[z, dy2, o] + rMP[y, dz1, o] - rMP[y, dz2, o])
/ 2
)
Ozz = ihff * (rMP[z, dz1, o] - rMP[z, dz2, o])
H1Oxx = ihff * (
rMP[x, dx1, h1o]
- rMP[x, dx2, h1o]
- (rMP[q, dx1, h1o] - rMP[q, dx2, h1o]) * (RH1x - ROx)
)
H1Oyx = ihff * (
(rMP[y, dx1, h1o] - rMP[y, dx2, h1o] + rMP[x, dy1, h1o] - rMP[x, dy2, h1o])
/ 2
- (rMP[q, dx1, h1o] - rMP[q, dx2, h1o]) * (RH1y - ROy)
# - (rMP[0, dy1, h1o] - rMP[0, dy2, h1o])*(RH1x-ROx) THIS IS REALLY... A BUG?
)
H1Oyy = ihff * (
rMP[y, dy1, h1o]
- rMP[y, dy2, h1o]
- (rMP[q, dy1, h1o] - rMP[q, dy2, h1o]) * (RH1y - ROy)
)
H1Ozx = ihff * (
(rMP[z, dx1, h1o] - rMP[z, dx2, h1o] + rMP[x, dz1, h1o] - rMP[x, dz2, h1o])
/ 2
- (rMP[q, dx1, h1o] - rMP[q, dx2, h1o]) * (RH1z - ROz)
# - (rMP[q, dz1, h1o] - rMP[q, dz2, h1o])*(RH1x-ROx) #THIS IS REALLY... A BUG?
)
H1Ozy = ihff * (
(rMP[z, dy1, h1o] - rMP[z, dy2, h1o] + rMP[y, dz1, h1o] - rMP[y, dz2, h1o])
/ 2
- (rMP[q, dy1, h1o] - rMP[q, dy2, h1o]) * (RH1z - ROz)
# - (rMP[q, dz1, h1o] - rMP[q, dz2, h1o])*(RH1y-ROy) THIS IS REALLY... A BUG?
)
H1Ozz = ihff * (
rMP[z, dz1, h1o]
- rMP[z, dz2, h1o]
- (rMP[q, dz1, h1o] - rMP[q, dz2, h1o]) * (RH1z - ROz)
)
H1xx = ihff * (rMP[x, dx1, h1] - rMP[x, dx2, h1])
H1yx = (
ihff * (rMP[y, dx1, h1] - rMP[y, dx2, h1])
+ ihff * (rMP[x, dy1, h1] - rMP[x, dy2, h1])
) / 2
H1yy = ihff * (rMP[y, dy1, h1] - rMP[y, dy2, h1])
H1zx = (
ihff * (rMP[z, dx1, h1] - rMP[z, dx2, h1])
+ ihff * (rMP[x, dz1, h1] - rMP[x, dz2, h1])
) / 2
H1zy = (
ihff * (rMP[z, dy1, h1] - rMP[z, dy2, h1])
+ ihff * (rMP[y, dz1, h1] - rMP[y, dz2, h1])
) / 2
H1zz = ihff * (rMP[z, dz1, h1] - rMP[z, dz2, h1])
H2Oxx = ihff * (
rMP[x, dx1, h2o]
- rMP[x, dx2, h2o]
- (rMP[q, dx1, h2o] - rMP[q, dx2, h2o]) * (RH2x - ROx)
)
H2Oyx = ihff * (
(rMP[y, dx1, h2o] - rMP[y, dx2, h2o] + rMP[x, dy1, h2o] - rMP[x, dy2, h2o])
/ 2
- (rMP[q, dx1, h2o] - rMP[q, dx2, h2o]) * (RH2y - ROy)
# - (rMP[q, dy1, h1o] - rMP[q, dy2, h1o])*(RH2x-ROx) THIS IS REALLY... A BUG?
)
H2Oyy = ihff * (
rMP[y, dy1, h2o]
- rMP[y, dy2, h2o]
- (rMP[q, dy1, h2o] - rMP[q, dy2, h2o]) * (RH2y - ROy)
)
H2Ozx = ihff * (
(rMP[z, dx1, h2o] - rMP[z, dx2, h2o] + rMP[x, dz1, h2o] - rMP[x, dz2, h2o])
/ 2
- (rMP[q, dx1, h2o] - rMP[q, dx2, h2o]) * (RH2z - ROz)
# - (rMP[q, dz1, h1o] - rMP[q, dz2, h1o])*(RH2x-ROx) #THIS IS REALLY... A BUG?
)
H2Ozy = ihff * (
(rMP[z, dy1, h2o] - rMP[z, dy2, h2o] + rMP[y, dz1, h2o] - rMP[y, dz2, h2o])
/ 2
- (rMP[q, dy1, h2o] - rMP[q, dy2, h2o]) * (RH2z - ROz)
# - (rMP[q, dz1, h2o] - rMP[q, dz2, h2o])*(RH2y-ROy) THIS IS REALLY... A BUG?
)
H2Ozz = ihff * (
rMP[z, dz1, h2o]
- rMP[z, dz2, h2o]
- (rMP[q, dz1, h2o] - rMP[q, dz2, h2o]) * (RH2z - ROz)
)
H2H1xx = ihff * (
rMP[x, dx1, h2h1]
- rMP[x, dx2, h2h1]
- (rMP[q, dx1, h2h1] - rMP[q, dx2, h2h1]) * (RH2x - RH1x)
)
H2H1yx = ihff * (
(
rMP[y, dx1, h2h1]
- rMP[y, dx2, h2h1]
+ rMP[x, dy1, h2h1]
- rMP[x, dy2, h2h1]
)
/ 2
- (rMP[q, dx1, h2h1] - rMP[q, dx2, h2h1]) * (RH1y - ROy)
# - (rMP[q, dy1, h2h1] - rMP[q, dy2, h2h1])*(RH1x-ROx) THIS IS REALLY... A BUG?
)
H2H1yy = ihff * (
rMP[y, dy1, h2h1]
- rMP[y, dy2, h2h1]
- (rMP[q, dy1, h2h1] - rMP[q, dy2, h2h1]) * (RH2y - RH1y)
)
H2H1zx = ihff * (
(
rMP[z, dx1, h2h1]
- rMP[z, dx2, h2h1]
+ rMP[x, dz1, h2h1]
- rMP[x, dz2, h2h1]
)
/ 2
- (rMP[q, dx1, h2h1] - rMP[q, dx2, h2h1]) * (RH1z - ROz)
# - (rMP[q, dz1, h2h1] - rMP[q, dz2, h2h1])*(RH1x-ROx) #THIS IS REALLY... A BUG?
)
H2H1zy = ihff * (
(
rMP[z, dy1, h2h1]
- rMP[z, dy2, h2h1]
+ rMP[y, dz1, h2h1]
- rMP[y, dz2, h2h1]
)
/ 2
- (rMP[q, dy1, h2h1] - rMP[q, dy2, h2h1]) * (RH1z - ROz)
# - (rMP[q, dz1, h2h1] - rMP[q, dz2, h2h1])*(RH1y-RO[1]) THIS IS REALLY... A BUG?
)
H2H1zz = ihff * (
rMP[z, dz1, h2h1]
- rMP[z, dz2, h2h1]
- (rMP[q, dz1, h2h1] - rMP[q, dz2, h2h1]) * (RH2z - RH1z)
)
H2xx = ihff * (rMP[x, dx1, h2] - rMP[x, dx2, h2])
H2yx = (
ihff * (rMP[y, dx1, h2] - rMP[y, dx2, h2])
+ ihff * (rMP[x, dy1, h2] - rMP[x, dy2, h2])
) / 2
H2yy = ihff * (rMP[y, dy1, h2] - rMP[y, dy2, h2])
H2zx = (
ihff * (rMP[z, dx1, h2] - rMP[z, dx2, h2])
+ ihff * (rMP[x, dz1, h2] - rMP[x, dz2, h2])
) / 2
H2zy = (
ihff * (rMP[z, dy1, h2] - rMP[z, dy2, h2])
+ ihff * (rMP[y, dz1, h2] - rMP[y, dz2, h2])
) / 2
H2zz = ihff * (rMP[z, dz1, h2] - rMP[z, dz2, h2])
self.assert_allclose(O[0], Oxx, text="Oxx")
self.assert_allclose(O[1], Oyx, text="Oyx")
self.assert_allclose(O[2], Oyy, text="Oyy")
self.assert_allclose(O[3], Ozx, text="Ozx")
self.assert_allclose(O[4], Ozy, text="Ozy")
self.assert_allclose(O[5], Ozz, text="Ozz")
self.assert_allclose(H1O[0], H1Oxx, text="H1Oxx")
self.assert_allclose(H1O[1], H1Oyx, text="H1Oyx")
self.assert_allclose(H1O[2], H1Oyy, text="H1Oyy")
self.assert_allclose(H1O[3], H1Ozx, text="H1Ozx")
self.assert_allclose(H1O[4], H1Ozy, text="H1Ozy")
self.assert_allclose(H1O[5], H1Ozz, text="H1Ozz")
self.assert_allclose(H1[0], H1xx, text="H1xx")
self.assert_allclose(H1[1], H1yx, text="H1yx")
self.assert_allclose(H1[2], H1yy, text="H1yy")
self.assert_allclose(H1[3], H1zx, text="H1zx")
self.assert_allclose(H1[4], H1zy, text="H1zy")
self.assert_allclose(H1[5], H1zz, text="H1zz")
self.assert_allclose(H2O[0], H2Oxx, text="H2Oxx")
self.assert_allclose(H2O[1], H2Oyx, text="H2Oyx")
self.assert_allclose(H2O[2], H2Oyy, text="H2Oyy")
self.assert_allclose(H2O[3], H2Ozx, text="H2Ozx")
self.assert_allclose(H2O[4], H2Ozy, text="H2Ozy")
self.assert_allclose(H2O[5], H2Ozz, text="H2Ozz")
self.assert_allclose(H2H1[0], H2H1xx, text="H2H1xx")
self.assert_allclose(H2H1[1], H2H1yx, text="H2H1yx")
self.assert_allclose(H2H1[2], H2H1yy, text="H2H1yy")
self.assert_allclose(H2H1[3], H2H1zx, text="H2H1zx")
self.assert_allclose(H2H1[4], H2H1zy, text="H2H1zy")
self.assert_allclose(H2H1[5], H2H1zz, text="H2H1zz")
self.assert_allclose(H2[0], H2xx, text="H2xx")
self.assert_allclose(H2[1], H2yx, text="H2yx")
self.assert_allclose(H2[2], H2yy, text="H2yy")
self.assert_allclose(H2[3], H2zx, text="H2zx")
self.assert_allclose(H2[4], H2zy, text="H2zy")
self.assert_allclose(H2[5], H2zz, text="H2zz")
def test_altint(self, molfrag):
R = molfrag.R
rMP = ref.rMP
diff = [(1, 2), (3, 4), (5, 6)]
bonds = (1, 3, 4)
ablab = ("O", "H1O", "H1", "H2O", "H2H1", "H2")
ijlab = ("xx", "yx", "yy", "zx", "zy", "zz")
pol = np.zeros((6, molfrag.noa * (molfrag.noa + 1) // 2))
for ab, a, b in pairs(molfrag.noa):
for ij, i, j in pairs(3):
# from pdb import set_trace; set_trace()
i1, i2 = diff[i]
j1, j2 = diff[j]
pol[ij, ab] += (
rMP[i + 1, j1, ab]
- rMP[i + 1, j2, ab]
+ rMP[j + 1, i1, ab]
- rMP[j + 1, i2, ab]
) / (4 * ref.ff)
if ab in bonds:
pol[ij, ab] -= (
(R[a][i] - R[b][i])
* (rMP[0, j1, ab] - rMP[0, j2, ab])
/ (2 * ref.ff)
)
self.assert_allclose(
ref.Aab[ij, ab], pol[ij, ab], text="%s%s" % (ablab[ab], ijlab[ij])
)
def test_polarizability_allbonds_atoms(self, molfrag):
Aab = molfrag.Aab[0] # + molfrag.dAab
noa = molfrag.noa
Acmp = full.matrix(ref.Aab.shape)
ab = 0
for a in range(noa):
for b in range(a):
Acmp[:, ab] = (Aab[:, :, a, b] + Aab[:, :, b, a]).pack()
ab += 1
Acmp[:, ab] = Aab[:, :, a, a].pack()
ab += 1
# atoms
self.assert_allclose(ref.Aab[:, 0], Acmp[:, 0], atol=0.005)
self.assert_allclose(ref.Aab[:, 2], Acmp[:, 2], atol=0.005)
self.assert_allclose(ref.Aab[:, 5], Acmp[:, 5], atol=0.005)
def test_polarizability_allbonds_bonds(self, molfrag):
Aab = molfrag.Aab[0] + molfrag.dAab[0] * .5
noa = molfrag.noa
Acmp = full.matrix(ref.Aab.shape)
ab = 0
for a in range(noa):
for b in range(a):
Acmp[:, ab] = (Aab[:, :, a, b] + Aab[:, :, b, a]).pack()
ab += 1
Acmp[:, ab] = Aab[:, :, a, a].pack()
ab += 1
# atoms
self.assert_allclose(ref.Aab[:, 1], Acmp[:, 1], atol=0.150, err_msg="H1O")
self.assert_allclose(ref.Aab[:, 3], Acmp[:, 3], atol=0.150, err_msg="H2O")
self.assert_allclose(ref.Aab[:, 4], Acmp[:, 4], atol=0.005, err_msg="H2H1")
def test_polarizability_nobonds(self, molfrag):
Aab = molfrag.Aab[0] + molfrag.dAab[0] * .5
noa = molfrag.noa
Acmp = full.matrix((6, noa))
Aa = Aab.sum(axis=3).view(full.matrix)
for a in range(noa):
Acmp[:, a] = Aa[:, :, a].pack()
# atoms
self.assert_allclose(Acmp, ref.Aa, atol=0.07)
def test_potfile_PAn0(self, molfrag):
PAn0 = molfrag.output_potential_file(maxl=-1, pol=0, hyper=0)
self.assert_str(PAn0, ref.PAn0)
def test_potfile_PA00(self, molfrag):
PA00 = molfrag.output_potential_file(maxl=0, pol=0, hyper=0)
self.assert_str(PA00, ref.PA00)
def test_potfile_PA10(self, molfrag):
PA10 = molfrag.output_potential_file(maxl=1, pol=0, hyper=0)
self.assert_str(PA10, ref.PA10)
def test_potfile_PA20(self, molfrag):
PA20 = molfrag.output_potential_file(maxl=2, pol=0, hyper=0)
self.assert_str(PA20, ref.PA20)
def test_potfile_PA21(self, molfrag):
PA21 = molfrag.output_potential_file(maxl=2, pol=1, hyper=0)
self.assert_str(PA21, ref.PA21)
def test_potfile_PA22(self, molfrag):
PA22 = molfrag.output_potential_file(maxl=2, pol=2, hyper=0)
self.assert_str(PA22, ref.PA22)
|
vahtras/loprop
|
tests/test_h2o_beta_trans.py
|
Python
|
gpl-3.0
| 19,452
|
[
"Dalton"
] |
301b30964c8e8e8d9456953b16b2c21ce1c17750512c08684bd104661f4960aa
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMulttest(RPackage):
"""Resampling-based multiple hypothesis testing.
Non-parametric bootstrap and permutation resampling-based multiple
testing procedures (including empirical Bayes methods) for controlling
the family-wise error rate (FWER), generalized family-wise error rate
(gFWER), tail probability of the proportion of false positives (TPPFP),
and false discovery rate (FDR). Several choices of bootstrap-based null
distribution are implemented (centered, centered and scaled, quantile-
transformed). Single-step and step-wise methods are available. Tests
based on a variety of t- and F-statistics (including t-statistics based
on regression parameters from linear and survival models as well as
those based on correlation parameters) are included. When probing
hypotheses with t-statistics, users may also select a potentially faster
null distribution which is multivariate normal with mean zero and
variance covariance matrix derived from the vector influence function.
Results are reported in terms of adjusted p-values, confidence regions
and test statistic cutoffs. The procedures are directly applicable to
identifying differentially expressed genes in DNA microarray
experiments."""
homepage = "https://bioconductor.org/packages/multtest"
git = "https://git.bioconductor.org/packages/multtest.git"
version('2.40.0', commit='5f00017c2d3a31e05e1cfe06d9f7afdee19f8473')
version('2.38.0', commit='4dfe71cecfb298a94521088fb7bd83c5498d2915')
version('2.36.0', commit='babb15e8d110eb72300ad59cf7e53386237a4198')
version('2.34.0', commit='6ef873e05e6c93ede54f3421424f56eda057cd54')
version('2.32.0', commit='c5e890dfbffcc3a3f107303a24b6085614312f4a')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-multtest/package.py
|
Python
|
lgpl-2.1
| 2,298
|
[
"Bioconductor"
] |
a86dfad2bf3b3242c65f8d819210f568d9e6d16253a8244bcdc2e35f6e6f7a5b
|
# codegen.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters
MAGIC_NUMBER = 4
def compile(node, uri, filename=None, default_filters=None, buffer_filters=None, imports=None, source_encoding=None, generate_unicode=True):
"""generate module source code given a parsetree node, uri, and optional source filename"""
buf = util.FastEncodingBuffer(unicode=generate_unicode)
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer, _CompileContext(uri, filename, default_filters, buffer_filters, imports, source_encoding, generate_unicode), node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self, uri, filename, default_filters, buffer_filters, imports, source_encoding, generate_unicode):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_unicode = generate_unicode
class _GenerateRenderMethod(object):
"""a template visitor object which generates the full module source for a template."""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, parsetree.DefTag)
if self.in_def:
name = "render_" + node.name
args = node.function_decl.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(pagetag or node, name, args, buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
identifiers = property(lambda self:self.identifier_stack[-1])
def write_toplevel(self):
"""traverse a template structure for module-level directives and generate the
start of module-level code."""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = util.Set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers()
module_identifiers.declared = module_ident
# module-level names, python code
if not self.compiler.generate_unicode and self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" % self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %s" % repr(MAGIC_NUMBER))
self.printer.writeline("_modified_time = %s" % repr(time.time()))
self.printer.writeline("_template_filename=%s" % repr(self.compiler.filename))
self.printer.writeline("_template_uri=%s" % repr(self.compiler.uri))
self.printer.writeline("_template_cache=cache.Cache(__name__, _modified_time)")
self.printer.writeline("_source_encoding=%s" % repr(self.compiler.source_encoding))
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(buf, source='', lineno=0, pos=0, filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = module_identifiers.topleveldefs.union(main_identifiers.topleveldefs)
[module_identifiers.declared.add(x) for x in ["UNDEFINED"]]
if impcode:
[module_identifiers.declared.add(x) for x in impcode.declared_identifiers]
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %s" % repr([n.name for n in main_identifiers.topleveldefs.values()]))
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return main_identifiers.topleveldefs.values()
def write_render_callable(self, node, name, args, buffered, filtered, cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(self.compiler.identifiers.branch(self.node))
if not self.in_def and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared)>0):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" % ','.join(["%s=%s" % (x, x) for x in self.identifiers.argument_declared]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(node, name, args, buffered, self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which is enclosed in <%! %> tags
in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" % (node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None,None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if node.attributes.has_key('import'):
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
class NSDefVisitor(object):
def visitDefTag(s, node):
self.write_inline_def(node, identifiers, nested=False)
export.append(node.name)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
callable_name = "make_namespace()"
else:
callable_name = "None"
self.printer.writeline("ns = runtime.Namespace(%s, context._clean_inheritance_tokens(), templateuri=%s, callables=%s, calling_uri=_template_uri, module=%s)" % (repr(node.name), node.parsed_attributes.get('file', 'None'), callable_name, node.parsed_attributes.get('module', 'None')))
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline("context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable definitions for defs and/or
name lookup within the function's context argument. the names declared are based on the
names that are referenced in the function body, which don't otherwise have any explicit
assignment operation. names that are assigned within the body are assumed to be
locally-scoped variables and are not separately declared.
for def callable definitions, if the def is a top-level callable then a
'stub' callable is generated which wraps the current Context into a closure. if the def
is not top-level, it is fully rendered as a local closure."""
# collection of all defs available to us in this scope
comp_idents = dict([(c.name, c) for c in identifiers.defs])
to_write = util.Set()
# write "context.get()" for all variables we are going to need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define right here
to_write = to_write.union(util.Set([c.name for c in identifiers.closuredefs.values()]))
# remove identifiers that are declared in the argument signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to. in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.iteritems():
if ns.attributes.has_key('import'):
self.printer.writeline("_mako_get_namespace(context, %s)._populate(_import_ns, %s)" % (repr(ident), repr(re.split(r'\s*,\s*', ns.attributes['import']))))
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline("%s = _mako_get_namespace(context, %s)" % (ident, repr(ident)))
else:
if getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("%s = _import_ns.get(%s, context.get(%s, UNDEFINED))" % (ident, repr(ident), repr(ident)))
else:
self.printer.writeline("%s = context.get(%s, UNDEFINED)" % (ident, repr(ident)))
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.function_decl.funcname
namedecls = node.function_decl.get_argument_expressions()
nameargs = node.function_decl.get_argument_expressions(include_defaults=False)
if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline("return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.function_decl.get_argument_expressions()
self.printer.writeline("def %s(%s):" % (node.name, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.name, namedecls, False, identifiers, inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached, callstack=True):
"""write the end section of a rendering function, either outermost or inline.
this takes into account if the rendering function was filtered, buffered, etc.
and closes the corresponding try: block if any, and writes code to retrieve captured content,
apply filters, send proper return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s, False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name, args, buffered, identifiers, inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key', repr(name))
cacheargs = {}
for arg in (('cache_type', 'type'), ('cache_dir', 'data_dir'), ('cache_timeout', 'expiretime'), ('cache_url', 'url')):
val = node_or_pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] = int(eval(val))
else:
cacheargs[arg[1]] = val
else:
if self.compiler.pagetag is not None:
val = self.compiler.pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] == int(eval(val))
else:
cacheargs[arg[1]] = val
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [ '=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a for a in args]
self.write_variable_declares(identifiers, toplevel=toplevel, limit=node_or_pagetag.undeclared_identifiers())
if buffered:
s = "context.get('local').get_cached(%s, %screatefunc=lambda:__M_%s(%s))" % (cachekey, ''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]), name, ','.join(pass_args))
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
self.printer.writelines("return " + s,None)
else:
self.printer.writelines(
"__M_writer(context.get('local').get_cached(%s, %screatefunc=lambda:__M_%s(%s)))" % (cachekey, ''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]), name, ','.join(pass_args)),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters present in the given
filter names, adjusting for the global 'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or (self.compiler.pagetag is not None and len(self.compiler.pagetag.filter_args.args)) or len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args, "%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
self.printer.writeline(None)
else:
self.write_source_comment(node)
self.printer.writeline(node.text)
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" % self.create_filter_callable(node.filter_args.args, "__M_buf.getvalue()", False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template, to simulate "enclosing scope"
self.printer.writeline('__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin()[__M_key]) for __M_key in [%s] if __M_key in __M_locals_builtin()]))' % ','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline("runtime._include_file(context, %s, _template_uri, %s)" % (node.parsed_attributes['file'], args))
else:
self.printer.writeline("runtime._include_file(context, %s, _template_uri)" % (node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used for the body() function,
# but for other non-body() <%def>s within <%call> we want the current caller off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
export.append(node.name)
# remove defs that are within the <%call> from the "closuredefs" defined
# in the body, so they dont render twice
if node.name in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.name]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# get local reference to current caller, if any
"caller = context.caller_stack._get_caller()",
# push on caller for nested call
"context.caller_stack.nextcaller = runtime.Namespace('caller', context, callables=ccall(caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable([], node.attributes['expr'], True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, node=None, parent=None, nested=False):
if parent is not None:
# things that have already been declared in an enclosing namespace (i.e. names we can just use)
self.declared = util.Set(parent.declared).union([c.name for c in parent.closuredefs.values()]).union(parent.locally_declared).union(parent.argument_declared)
# if these identifiers correspond to a "nested" scope, it means whatever the
# parent identifiers had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = util.Set()
self.topleveldefs = util.SetLikeDict()
# things within this level that are referenced before they are declared (e.g. assigned to)
self.undeclared = util.Set()
# things that are declared locally. some of these things could be in the "undeclared"
# list as well if they are referenced before declared
self.locally_declared = util.Set()
# assignments made in explicit python blocks. these will be propigated to
# the context of local def calls.
self.locally_assigned = util.Set()
# things that are declared in the argument signature of the def callable
self.argument_declared = util.Set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with this Identifiers as the parent."""
return _Identifiers(node, self, **kwargs)
defs = property(lambda self:util.Set(self.topleveldefs.union(self.closuredefs).values()))
def __repr__(self):
return "Identifiers(declared=%s, locally_declared=%s, undeclared=%s, topleveldefs=%s, closuredefs=%s, argumenetdeclared=%s)" % (repr(list(self.declared)), repr(list(self.locally_declared)), repr(list(self.undeclared)), repr([c.name for c in self.topleveldefs.values()]), repr([c.name for c in self.closuredefs.values()]), repr(self.argument_declared))
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(node.declared_identifiers())
def visitDefTag(self, node):
if node.is_root():
self.topleveldefs[node.name] = node
elif node is not self.node:
self.closuredefs[node.name] = node
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
|
gabriel/shrub
|
lib/mako/codegen.py
|
Python
|
mit
| 32,514
|
[
"VisIt"
] |
2780dd04068fc3bcc62f62e76dd2e3e05d36f6e69750062bc74ac8cb1fe22f83
|
#Standard dependencies
import os
import sys
import inspect
import time
import cPickle as pickle
import re
from copy import copy
from string import Template
#Non-standard dependencies
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as spline
import matplotlib as mpl
mpl.use('Agg')
import pylab as plt
import matplotlib.transforms as mtransforms
from matplotlib.mlab import griddata as mlab_griddata
def griddata(*args, **kwargs):
"""Wrapper function to avoid annoying griddata errors"""
try:
return mlab_griddata(*args, **kwargs)
except RuntimeError:
kwargs['interp'] = 'linear'
return mlab_griddata(*args, **kwargs)
import mpmath as mp
from ase.atoms import string2symbols
from ase.thermochemistry import IdealGasThermo, HarmonicThermo
from ase.structure import molecule
from catmap.model import ReactionModel
import data
__version__ = "0.2.270"
def load(setup_file):
rxm = ReactionModel(setup_file = setup_file)
return rxm
modified = []
class ReactionModelWrapper:
def __getattribute__(self,attr):
"Force use of custom getattr"
return self.__getattr__(self,attr)
def __getattr__(self,attr):
"Return the value of the reaction model instance if its there. Otherwise return the instances own value (or none if the instance does not have the attribute defined and the attribute is not private)"
if attr == '_rxm':
return object.__getattribute__(self,attr)
elif hasattr(self._rxm,attr):
return getattr(self._rxm,attr)
else:
if attr in self.__dict__:
val = object.__getattribute__(self,attr)
del self.__dict__[attr]
#this makes sure that the attr is read from _rxm
setattr(self._rxm,attr,val)
return val
elif attr.startswith('_'):
raise AttributeError()
else:
return None
def __setattr__(self,attr,val):
"Set attribute for the instance as well as the reaction_model instance"
accumulate = ['_required','_log_strings','_function_strings']
if attr == '_rxm':
self.__dict__[attr] = val
elif attr in accumulate:
self._rxm.__dict__[attr].update(val)
else:
setattr(self._rxm,attr,val)
|
ajmedford/catmap
|
catmap/__init__.py
|
Python
|
gpl-3.0
| 2,369
|
[
"ASE"
] |
6767de79b2ec4670502cd44f26c264cac86e7fd4ec92cc62a89a3adb6f6f2f76
|
# * fb.py main module for pythons fbpy package. Draws stuff in the
# * Linux framebuffer.
# * Copyright (C) 2014 Marcell Marosvolgyi aka noisegate
# *
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# *
# version: 0.1
# profanities included 4 xtra power
import fblib
import svg
import numpy as np
from itertools import count
import os.path
import copy
"""
Some module documentation....
.. doctest::
>>> print "hw!"
hw!
"""
class Uniton(type):
"""
The Uniton is a special case of the Vulgion
and ensures inheritance of certain properties of
the primeordial instance for all consecutive instances.
"""
#__instance = None
def __init__(self, *args, **kwargs):
self.__instance = None
super(Uniton, self).__init__(*args, **kwargs)
def __call__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = super(Uniton, cls).__call__(*args, **kwargs)
cls.__instance._setupfb()
cls.__instance.origo = (0,0)
cls.__instance.size = (cls.scr_width, cls.scr_height)
cls.__instance.pixelstyle = Pixelstyle()
else:
cls.__instance = super(Uniton, cls).__call__(*args, **kwargs)
cls.instances.append(cls.__instance)
return cls.__instance
class Bounds(object):
def __init__(self, function):
self.function = function
def adjust(self, x, maxi):
#if float then assume scaled
#and scale it:
if (type(x) == float):
#if x<0 :x=0
#if x>1 :x=1
x = x * (maxi-1)
return x % maxi
def __call__(self, *args, **kwargs):
origo = args[0].origo
size = args[0].size
pixelstyle = args[0].pixelstyle
maxx=size[0]
maxy=size[1]
#should let the driver know about the winsize
#args[0].winsize = (origo[0],origo[1],size[0],size[1], args[0].pixelstyle)
args[0].informdriver()
new_args = []
firstarray = True
for i, X in enumerate(args):
if isinstance(X, tuple):
x = self.adjust(X[0],maxx)
y = self.adjust(X[1],maxy)
#x = x + origo[0]
#y = y + origo[1]
new_args.append((int(x),int(y)))
elif isinstance(X, np.ndarray):
X = np.where(X>=0,X,0)
X = np.where(X<=1,X,1)
if firstarray:
X *= (maxx-1)
#X += origo[0]
firstarray = False
else:
X *= (maxy-1)
#X += origo[1]
X = X.astype(np.int32)
new_args.append(X)
else:
new_args.append(X)
args = tuple(new_args)
return self.function(*args, **kwargs)
def __get__(self, instance, owner):
def wrapper(*args, **kwargs):
return self(instance, *args, **kwargs)
wrapper.__doc__ = self.function.__doc__
wrapper.__name__ = self.function.__name__
return wrapper
class Color(object):
def __init__(self, *args):
if args:
self.r = args[0]
self.g = args[1]
self.b = args[2]
self.a = args[3]
else:
self.r=0
self.g=0
self.b=0
self.a=0
def __repr__(self):
return "Red={0}, Green={1}, Blue={2}, Alpha={3}".format(self.r,self.g,self.b,self.a)
@property
def red(self):
return self.r
@red.setter
def red(self, red):
self.r = red
@property
def green(self):
return self.g
@green.setter
def green(self, green):
self.g = green
@property
def blue(self):
return self.b
@blue.setter
def blue(self, blue):
self.b = blue
@property
def alpha(self):
return self.a
@alpha.setter
def alpha(self, alpha):
self.a = alpha
class Pixelstyle(object):
def __init__(self, *args, **kwargs):
self.color = Colors.white
self.style = Styles.solid
self.blur = 0
self.blurradius = 1
self.sigma = 1
class Geom(object):
def __init__(self, selfy, *args):
self.args = args
self.parent = selfy
def keep(self):
self.parent.objects.append(self)
def redraw(self):
pass
class Rect(Geom):
def redraw(self):
self.parent.rect(*args)
class Line(Geom):
def redraw(self):
self.parent.line(*args)
class Point(object):
def redraw(self):
self.parent.point(*args)
class Coordinate(object):
def __init__(self,x, y):
self.x = x
self.y = y
def postprocess(self):
pass
def docme():
return "Surface object of the fbpy module."
class Colors(object):
"""
Some prefab colors, to make life easier.
Food for Pixelstyle. e.g.:
"""
#TODO: make immutable !!! else ALL
#colrs change throughout FOOL I WAS
black = Color(0,0,0,0)
white = Color(255,255,255,0)
grey = Color(100,100,100,0)
darkgrey = Color(30,30,30,0)
green = Color(0,255,0,0)
darkgreen = Color(0,100,0,0)
magenta = Color(0,170,170,0)
class Styles(object):
solid = 0
dotted = 2
dashed = 1
class Pixelstyles(object):
#shoudl make this immutable
faint = Pixelstyle()
faint.blur = 2
faint.blurradius = 2
faint.style = Styles.solid
faint.sigma = 1
faint.color = Color(60,60,130,100)
sharp = Pixelstyle()
sharp.blur =0
sharp.style = Styles.solid
sharp.blurradius = 1
sharp.sigma =1
sharp.color = Colors.white
class Polys(object):
"""
Multi poly class. Each surface has an instance
of these.
draw3dpolys method of surface will use it.
"""
def __init__(self):
self.x = []
self.y = []
self.z = []
def getthem(self):
l = len(self.x)
for i in range(l):
yield self.x[i], self.y[i], self.z[i]
class Trafo(object):
"""
Handle two dim lintrafos for
your surface.
that is: Stretch and or Rotate
yih.
Work-flow.
You start with making an instance:
.. code-block:: python
T = Trafo()
Uppon instanciation you get an unity
transform by default.
Then decide what should happen to it.. E.g.
you want to rotate and then stretch it.
Well, you'll define two Operators:
.. code-block:: python
R = Trafo()
S = Trafo()
R.rotate(0.1) #where 0.1 is the angle in RAD
S.stretch(1.05, 1.05) #ehhhr, 5% in horiz and vert
Now you can iterate:
.. code-block:: python
T *=R
T *=S
Each surface has a built in trafo fb.Surface.trafo, which is
unity or identity by default. The state of this operator is
passed to the fb driver.
Here is a full example:
.. doctest::
>>> import fbpy.fb as fb
>>> main = fb.Surface()
>>> sub = fb.Surface((100,100),(200,200))
>>> R = fb.Trafo()
>>> R.rotate(0.1)
>>> sub.clear()
0
>>> for i in range(10):
... sub.trafo*=R
... sub.rect((10,10),(190,190))
0
0
0
0
0
0
0
0
0
0
>>> sub.grabsilent("./source/images/rotate.png")
0
.. image:: ./images/rotate.png
.. code-block:: python
sub.trafo.identity() #reset the transform
"""
def __init__(self):
self.m_11 = 1.0
self.m_12 = 0.0
self.m_21 = 0.0
self.m_22 = 1.0
self.o_11 = None
self.o_12 = None
self.o_21 = None
self.o_22 = None
self.store11 = None
self.store12 = None
self.store21 = None
self.store22 = None
self.unity = 1
def identity(self):
self.m_11 = 1.0
self.m_12 = 0.0
self.m_21 = 0.0
self.m_22 = 1.0
self.unity = 1
def set(self, a,b,c,d):
self.m_11 = a
self.m_12 = b
self.m_21 = c
self.m_22 = d
self.unity = 0
def rotate(self, angle):
self.o_11 = np.cos(angle)
self.o_12 = np.sin(angle)
self.o_21 = -np.sin(angle)
self.o_22 = np.cos(angle)
self.unity = 0
self.multiply()
def stretch(self, x, y):
self.o_11 = x
self.o_12 = 0.0
self.o_21 = 0.0
self.o_22 = y
self.unity = 0
self.multiply()
def multiply(self):
#multiply self with other
#self() * self
self.store11 = self.m_11 * self.o_11 + self.m_12 * self.o_21
self.store12 = self.m_11 * self.o_12 + self.m_12 * self.o_22
self.store21 = self.m_21 * self.o_11 + self.m_22 * self.o_21
self.store22 = self.m_21 * self.o_12 + self.m_22 * self.o_22
self.m_11 = self.store11
self.m_12 = self.store12
self.m_21 = self.store21
self.m_22 = self.store22
def __mul__(self, T):
"""
Be aware, this instantiates a new trafo
uppon each multiplication. not very memory
efficient...
"""
self.o_11 = T.m_11
self.o_12 = T.m_12
self.o_21 = T.m_21
self.o_22 = T.m_22
res = Trafo()
res.m_11 = self.m_11 * self.o_11 + self.m_12 * self.o_21
res.m_12 = self.m_11 * self.o_12 + self.m_12 * self.o_22
res.m_21 = self.m_21 * self.o_11 + self.m_22 * self.o_21
res.m_22 = self.m_21 * self.o_12 + self.m_22 * self.o_22
res.unity = 0
return res
def __imul__(self, T):
self.o_11 = T.m_11
self.o_12 = T.m_12
self.o_21 = T.m_21
self.o_22 = T.m_22
self.unity = 0
self.multiply()
return self
def __pow__(self, T):
pass
class Trafo3(object):
"""
transforms, or cameraviews for 2d projected
3d objects.
args:
teta{x,y,z} rotaion angles of object around own Origin
cteta{x,y,z] cam rotation
"""
def __init__(self, tetax,tetay,tetaz,
ctetax,ctetay,ctetaz,
ex,ey,ez,cx,cy,cz):
self.tetax = tetax
self.tetay = tetay
self.tetaz = tetaz
self.ctetax = ctetax
self.ctetay = ctetay
self.ctetaz = ctetaz
self.ex = ex
self.ey = ey
self.ez = ez
self.cx = cx
self.cy = cy
self.cz = cz
class DDDObject(object):
polynr=0
def __init__(self,x,y,z,surf):
self.polys = Polys()
self.polys.x = x
self.polys.y = y
self.polys.z = z
self.surface = surf
self.mynumber = None
self.order = []
self.anglex = 0
self.angley = 0
self.anglez = 0
self.tx = 0
self.ty = 0
self.tz = 0
self.init()
@classmethod
def increment(cls):
cls.polynr +=1
def init(self):
self.surface.informdriver()
fblib.fbinit()
for i in self.polys.getthem():
fblib.fbaddpoly(i[0],i[1],i[2],self.polynr)
self.mynumber = self.polynr
self.increment()
print self.mynumber
#def settrafransform(self):
# self.surface.informdriver()
# fblib.fbtransform3d(self.anglex, self.angley,self.anglez,
# self.tx,self.ty,self.tz,
# np.array(self.order, dtype=np.int32),self.mynumber)
def draw(self):
self.surface.informdriver()
fblib.fbsettrafoL( self.anglex, self.angley, self.anglez,
self.tx, self.ty, self.tz,
np.array(self.order, dtype=np.int32),len(self.order))
fblib.fbdraw3dpolys(self.mynumber)
@property
def tetax(self):
return self.anglex
@tetax.setter
def tetax(self, x):
self.order.append(1)
self.anglex = x
@property
def tetay(self):
return self.angley
@tetax.setter
def tetay(self, y):
self.order.append(2)
self.angley = y
@property
def tetaz(self):
return self.anglez
@tetax.setter
def tetaz(self, z):
self.order.append(3)
self.anglez = z
@property
def dx(self):
return self.tx
@dx.setter
def dx(self,tx):
self.order.append(4)
self.tx = tx
@property
def dy(self):
return self.ty
@dy.setter
def dy(self,ty):
self.order.append(5)
self.ty = ty
@property
def dz(self):
return self.tz
@dz.setter
def dz(self,tz):
self.order.append(6)
self.tz = tz
class Surface(object):
"""
This is the main class, it generates a drawing surface.
On first invokation, it will generate a surface which
encompasses the entire screen automaticaly *and* it
will open the framebuffer device.
The *classmethod* close will close it.
Subsequent instances will need arguments defining size and
position.
"""
__metaclass__ = Uniton
#__doc__ = docme()
instances = []
scr_width = 0
scr_height = 0
def __init__(self, *args):
self.origo = (0,0)
self.size = None
self.sprite = None
self.pixelstyle = Pixelstyle()
self.trafo = Trafo()
self.dddtrafo = Trafo3( 0.0,0.0,0.0,
0.0,0.0,0.0,
0.0,0.0,1.0,
1.0,1.0,1.0)
if len(args)==2:
if isinstance(args[0], tuple) and isinstance(args[1],tuple):
self.origo = args[0]
self.size = args[1]
self.informdriver()
self.polys = Polys()
self.objects = []
def informdriver(self):
"""
pass relevant class info to
fbutils driver,
this is how one 'instance' of the
driver can serve multiple Surface
instances
.. doctest::
>>> import fbpy.fb as fb
>>> main = fb.Surface()
>>> main.informdriver()
"""
a=fblib.fbsetwinparams( self.origo[0], self.origo[1], self.size[0], self.size[1],
self.pixelstyle.color.r,
self.pixelstyle.color.g,
self.pixelstyle.color.b,
self.pixelstyle.color.a,
self.pixelstyle.style,
self.pixelstyle.blur, self.pixelstyle.blurradius,
self.pixelstyle.sigma)
b=fblib.fbsettrafo( self.trafo.m_11, self.trafo.m_12,
self.trafo.m_21, self.trafo.m_22,
self.trafo.unity)
fblib.fbsettrafo3( self.dddtrafo.tetax, self.dddtrafo.tetay,self.dddtrafo.tetaz,
self.dddtrafo.ctetax, self.dddtrafo.ctetay,self.dddtrafo.ctetaz,
self.dddtrafo.ex,self.dddtrafo.ey,self.dddtrafo.ez,
self.dddtrafo.cx,self.dddtrafo.cy,self.dddtrafo.cz)
window = fblib.fbgetwinparams()
origos = (window[0] == self.origo[0]) and (window[1] == self.origo[1])
sizes = (window[2] == self.size[0]) and (window[3] == self.size[1])
if origos and sizes: return 0
else: return -1
@classmethod
def _setupfb(cls):
fblib.fbsetup()
cls.scr_width = fblib.fbgetWidth()
cls.scr_height = fblib.fbgetHeight()
@property
def focus(self):
return -1
@focus.setter
def focus(self, r):
self.blurradius = r
fblib.fbfocus(r)
@property
def winsize(self):
return (self.origo,self.size)
@winsize.setter
def winsize(self, X):
x0 = X[0]
y0 = X[1]
w = X[2]
h = X[3]
self.origo = (x0,y0)
self.size = (w,h)
self.informdriver()
def _set_color_(self, color):
fblib.fbsetcolor(color.r,color.g,color.b,color.a)
def _set_color2_(self, color):
fblib.fbsetcolor2(color.r,color.g,color.b,color.a)
def _set_style_(self, style_):
fblib.fbsetstyle(style_)
def set_dotstyle(self, dotstyle, blurrad_):
"""
set_dotstyle(<dotstlyle>, <blur radius>)
dotstyle 0 : fast plot
dotstyle 1 : plot with soft alpha
dotstyle 2 : plot with blur + soft alpha
blur radius: well, 2 sigma ^2 it is
"""
self.blur = dotstyle
self.blurradius = blurrad_
self.informdriver()
def __repr__(self):
message = """
framebuffer surface object:\n
origin X:{0}\n
origin Y:{1}\n
width :{2}\n
height :{3}\n
"""
return message.format(self.origo[0], self.origo[1], self.size[0], self.size[1])
def clear(self):
"""
will clear the temp buffer
"""
self.informdriver()
return fblib.fbclearbuffer()
def clearframebuffer(self):
"""
will creal the framebuffer
but not the temp buffer.
Use clearscreen for a clear screen,
or clear to clear the temp buffer
"""
self.informdriver()
fblib.fbclearscreen()
return 0
def clearscreen(self):
"""
will clear the screen,
that is, swap buffer + actual frameb
"""
self.informdriver()
fblib.fbclearbuffer()
fblib.fbclearscreen()
return 0
def styledredraw(self):
self.informdriver()
return fblib.fbstyledredraw()
def update(self):
"""
update()
draws the buffered geometries. So, you need this before you actualy see
anything
"""
self.informdriver()
return fblib.fbupdate()
def keepbackground(self):
self.informdriver()
return fblib.fbkeepcurrent()
def store(self):
self.sprite = self.get_raw()
return 0
def restore(self):
self.set_raw(self.sprite)
return 0
def overlay(self, res_buf, sprite, oldx, oldy, mode):
self.informdriver()
#length = self.size[0]*self.size[1]*4
#res_buf = np.zeros(length, dtype=np.int8)
return fblib.fboverlay(res_buf, sprite, oldx, oldy, mode)
def get_raw(self):
"""
get_raw()
returns an raw bitmap array of the current window, use
set_raw to put the bitmap back.
.. code-block:: python
sprite = main.get_raw()
main.set_raw(sprite)
"""
self.informdriver()
length = self.size[0]*self.size[1]*4
sprite = np.zeros(length, dtype=np.int8)
fblib.fbgetraw(sprite)
return sprite
def set_raw(self, sprite):
"""
set_raw(sprite)
puts the bitmap array into the buffer, see get_raw.
"""
self.informdriver()
fblib.fbsetraw(sprite)
def swap(self, page):
fblib.fbswap(page)
def fill(self,color):
self._set_color_(color)
fblib.fbclearscreen()
@Bounds
def poly(self, xdata, ydata):
"""
poly(<xdata numpy array>, <ydata numpy array>)
x, y will be the points, have to be the same length and type
style = 0, 1, 2
0: solid line
1: dashed line
2: dotted line
.. doctest::
>>> import fbpy.fb as fb
>>> import numpy as np
>>> x = np.arange(0, 1,0.01)
>>> y = 0.5*np.sin(x*2*2*np.pi) + 0.5
>>> main = fb.Surface()
>>> subwin = fb.Surface((0,0),(200,200))
>>> subwin.clear()
0
>>> subwin.pixelstyle = fb.Pixelstyles.faint
>>> subwin.poly(x, y)
0
>>> subwin.grabsilent("./source/images/poly.png")
0
.. image:: ./images/poly.png
"""
if isinstance(xdata, np.ndarray) and isinstance(ydata, np.ndarray):
if xdata.dtype == np.int32 and ydata.dtype == np.int32:
pass
else:
raise NameError("something wrong with the array")
else:
xdata = np.array(xdata, dtype=np.int32)
ydata = np.array(ydata, dtype=np.int32)
if len(xdata) == len(ydata):
fblib.fbpoly(xdata, ydata)
else:
raise NameError("SIZE MISSSMATCH")
return 0
@Bounds
def addpoly(self, x, y, z):
"""
just a test for the moment
I have to store this in this
instance...
and then on draw3dpolys should I
call the drivers addpoly!!!
addpoly(<x array>,<y array>, )
"""
if isinstance(x,np.ndarray) and isinstance(y, np.ndarray) and isinstance(z,np.ndarray):
x1 = x
y1 = y
z1 = z
else:
x1 = np.array(x, dtype=np.int32)
y1 = np.array(y, dtype=np.int32)
z1 = np.array(z, dtype=np.int32)
return fblib.fbaddpoly(x1,y1,z1)
#self.polys.x.append(x1)
#self.polys.y.append(y1)
#self.polys.z.append(z1)
return 0
def drawpolys(self):
"""
Draw a bunch of polygons
.. doctest::
>>> import fbpy.fb as fb
>>> import numpy as np
>>> main = fb.Surface()
>>> main.clear()
>>> sub = fb.Surface((100,100),(200,200))
>>> sub.clear()
0
>>> x1 = np.arange(0,1,0.02)
>>> y1 = 0.5*np.sin(x1*2*np.pi)+0.5
>>> z1 = np.zeros(np.size(x1))
>>> x2 = np.arange(0,1,0.02)
>>> y2 = 0.5*np.cos(x2*2*np.pi)+0.5
>>> z2 = np.zeros(np.size(x2))
>>> sub.addpoly(x1,y1,z1)
0
>>> sub.addpoly(x2,y2,z2)
0
>>> sub.drawpolys()
0
>>> sub.trafo.rotate(np.pi/2)
0
>>> sub.drawpolys()
0
>>> sub.grabsilent("./source/images/polys.png")
0
.. image:: ./images/polys.png
"""
self.informdriver()
return fblib.fbdrawpolys()
def draw3dpolys(self, resend, polynr):
self.informdriver()
#now I can upload the polys to the driver:
if resend==1:
#fblib.fbfreepolys()
fblib.fbinit()#set visit=0
for i in self.polys.getthem():
fblib.fbaddpoly(i[0],i[1],i[2],polynr)
fblib.fbdraw3dpolys(polynr)
def lintrafo(self,tx,ty,tz,dx,dy,dz,order,polynr):
self.informdriver()
return fblib.fbtransform3d(tx,ty,tz,dx,dy,dz,
np.array(order,dtype=np.int32),polynr)
def dumppolys(self):
"""
print informationa about the currently loaded
multipoly struct.
"""
fblib.fbprintapoly()
def freepolys(self):
fblib.fbfreepolys()
@Bounds
def line(self,X1, X2):
"""
line(<tuple crd from>,<tuple crd to>)
or
"""
fblib.fbline(X1[0], X1[1], X2[0], X2[1])
return 0
@Bounds
def arc(self, X1, R1, R2, startseg, endseg, segs):
"""
arc(<tuple>, <radius 1>, <radius 2>, <start seg>, <end seg>, <no seg>)
couple of examples here:
.. doctest::
>>> import fbpy.fb as fb
>>> main = fb.Surface()
>>> sub = fb.Surface((0,0), (200,200))
>>> sub.clear()
0
>>> sub.pixelstyle = fb.Pixelstyles.faint
>>> sub.arc((100,100), 60, 90, 0, 50, 100)
0
>>> sub.pixelstyle = fb.Pixelstyles.sharp
>>> sub.arc((100,100), 40, 40, 30, 90, 100)
0
>>> sub.grabsilent("./source/images/arc.png")
0
.. image:: ./images/arc.png
"""
if type(R1) == float:
rx=self.size[0] * R1/2
else:
rx = R1
if type(R2) == float:
ry=self.size[1] * R2/2
else:
ry = R2
fblib.fbarc(X1[0], X1[1], rx, ry, startseg, endseg, segs)
return 0
@Bounds
def circle(self, X1, R1, segs):
"""
circle(<tuple>,<radius>, <segments>)
Will draw a ...
.. doctest::
>>> import fbpy.fb as fb
>>> main = fb.Surface()
>>> sub = fb.Surface((0,0), (200,200))
>>> sub.clear()
0
>>> sub.circle((100,100),0.5, 100)
0
>>> sub.grabsilent("./source/images/circle.png")
0
.. image:: ./images/circle.png
"""
fblib.fbarc(X1[0],X1[1],R1*self.size[0],R1*self.size[1],0,segs,segs)
return 0
@Bounds
def rect(self, X1, X2):
"""
rect(<tuple>, <tuple>, <fb color>, <style>)
Will draw a rectangle @ first tuple, width and height
as in second tuple
"""
fblib.fbline(X1[0],X1[1],X2[0],X1[1])
fblib.fbline(X1[0],X1[1],X1[0],X2[1])
fblib.fbline(X1[0],X2[1],X2[0],X2[1])
fblib.fbline(X2[0],X1[1],X2[0],X2[1])
#return Rect(self, X1, X2, color)
return 0
@Bounds
def printxy(self, X1, string, size_):
"""
printxy (<tuple>, <string>, <size>)
Will print text in string at position defined by tuple (x, y).
Size can be 1 or 2, where 2 prints triple sized LCD-like format
returns 0
.. doctest::
>>> import fbpy.fb as fb
>>> main = fb.Surface()
>>> sub = fb.Surface((0,0),(800,100))
>>> sub.clear()
0
>>> sub.printxy((10,10),"Hello world!", 2)
0
>>> sub.printxy((10,38),"or a bit smaller...", 1)
0
>>> sub.pixelstyle.color = fb.Color(20,20,20,100)
>>> sub.pixelstyle.blur = 2
>>> sub.pixelstyle.blurradius = 4
>>> sub.pixelstyle.sigma = 1
>>> sub.printxy((10,76),"where R them goggles...", 1)
0
>>> sub.grabsilent("./source/images/printxy.png")
0
.. image:: ./images/printxy.png
"""
fblib.fbprint(X1[0],X1[1], string, size_)
return 0
@Bounds
def graticule(self, X1, WH):
"""
graticule(<tuple>,<tuple>, <fb.color>, <fb.color>)
draws scope-like graticule @ first tuple of size second tuple
(width/height). color = subs, color2 main
returns 0
.. doctest::
>>> import fbpy.fb as fb
>>> main = fb.Surface()
>>> sub2 = fb.Surface((0,0),(200,200))
>>> sub2.clear() == 0
True
>>> sub2.pixelstyle.color = fb.Color(200,200,200,00)
>>> sub2.fillrect((0,0),(200,200)) == 0
True
>>> sub2.pixelstyle.color = fb.Colors.white
>>> sub2.graticule((0.0,0.0),(1.0,1.0)) == 0
True
>>> sub2.grabsilent("./source/images/graticule.png") == 0
True
.. image:: ./images/graticule.png
"""
fblib.fbgraticule(X1[0],X1[1],WH[0]-X1[0],WH[1]-X1[1])
return 0
@Bounds
def fillrect(self, X1, WH):
fblib.fbfillrect(X1[0],X1[1],WH[0],WH[1])
return 0
def snow(self):
"""
snow()
show some noise...
.. doctest::
>>> import fbpy.fb as fb
>>> main = fb.Surface()
>>> sub = fb.Surface((0,0),(200,200))
>>> sub.clear()
0
>>> sub.pixelstyle = fb.Pixelstyles.faint
>>> sub.snow()
0
>>> sub.grabsilent("./source/images/snow.png")
0
.. image:: ./images/snow.png
"""
self.informdriver()
fblib.fbsnow()
return 0
@Bounds
def point(self, X1):
fblib.fbplot(X1[0],X1[1])
#return Point(self, X1, color)
def add(self, x):
if isinstance(x, Line): print "Adding line"
self.objects.append(x)
def blit(self, filename):
"""
blit(<filename>)
will put the PNG <filename> in the current surface
.. doctest::
>>> import fbpy.fb as fb
>>> main = fb.Surface()
>>> sub = fb.Surface((100,100),(600,600))
>>> sub.blit("../examples/cylon.png")
0
>>> sub.grabsilent("./source/images/gottherobot.png")
0
.. image:: ./images/gottherobot.png
"""
self.informdriver()
return fblib.fbblit(filename)
def grab(self,filename):
"""
grab(<filename>)
grabs current frame into file <filename>.png
"""
self.informdriver()
return fblib.fbgrab(filename)
def grabsilent(self, filename):
"""
grabsilent(<filename>)
grabs current buffer into file <filename>.png
so, if you dont use update, you'll never actually
*see* the drawing. Handy for doctest stuff
of other apps where you *only* wanna make
pics..
"""
self.informdriver()
return fblib.fbgrabsilent(filename)
def grabsequence(self, filename):
"""
grabsequence(<filename>)
grabs current frame into file with filename <filename#>
where # is an automatich counter. the output will be e.g.:
screenshot0001.png, screenshot0002.png, ...
you can use e.g.
.. code-block:: console
nerd@wonka: ~/tmp$ avconv -i <filename>%04d.png -c:v huffyuv <yourmoviename>.avi
to convert the sequence to a movie.
You can also use ofcourse somehtin like
.. code-block:: console
nerd@wonka: ~/tmp$ avconv -f fbdev -r 10 -i /dev/fb0 -c:v huffyuv /dev/shm/movi.avi 2> /dev/null
"""
numbered_filename = ("{}{:04d}.png".format(filename, i) for i in count(1))
try_this = next(numbered_filename)
while os.path.isfile(try_this):
try_this = next(numbered_filename)
self.grab(try_this)
return 0
def redraw(self):
for i, elem in enumerate(self.objects):
elem.redraw()
def something(self):
"""
.. doctest::
>>> print "Hello from a doctest.."
Hello from a doctest..
"""
def icopyu(self, othersurf):
self.origo = copy.deepcopy(othersurf.origo)
self.size = copy.deepcopy(othersurf.size)
self.pixelstyle = copy.deepcopy(othersurf.pixelstyle)
self.trafo = copy.deepcopy(othersurf.trafo)
self.informdriver()
self.sprite = othersurf.get_raw()
@classmethod
def isalive(self):
if self._Uniton__instance == None: return False
else: return True
@classmethod
def close(self):
self._Uniton__instance = None
fblib.fbclose()
return 0
class Scope(Surface):
def __init__(self):
pass
if __name__ == '__main__':
pass
|
noisegate/fbpy
|
build/lib.linux-x86_64-2.7/fbpy/fb.py
|
Python
|
gpl-2.0
| 33,713
|
[
"VisIt"
] |
e987213d737dff4bd7e4a55d4aace4cab771dd48ae2c388779b68ee4dda9e97c
|
"""
Flask server used for the registration page for **COMEX** app
Context:
- templates-based input form validated fields and checked if all were present
(base_form.html + static/js/comex_reg_form_controllers.js)
- Doors already validated and recorded the (email, password) combination
- labels of SOURCE_FIELDS are identical to the name of their target COLS
- fields will land into the 3 main db tables:
- *scholars*
- *affiliations*
- *keywords* (and *sch_kw* mapping table)
- a few fields give no columns, for ex: "other_org_type"
- webapp is exposed as "server_comex_registration.app" for the outside
- can be served in dev by python3 server_comex_registration.py
- better to serve it via gunicorn (cf run.sh)
"""
__author__ = "CNRS"
__copyright__ = "Copyright 2016 ISCPIF-CNRS"
__version__ = "1.5"
__email__ = "romain.loth@iscpif.fr"
__status__ = "Dev"
# ============== imports ==============
from re import sub, match
from os import path, remove
from json import dumps
from datetime import timedelta
from urllib.parse import unquote
from flask import Flask, render_template, request, \
redirect, url_for, session, Response
from flask_login import fresh_login_required, login_required, \
current_user, login_user, logout_user
if __package__ == 'services':
# when we're run via import
print("*** comex services ***")
from services.tools import mlog
from services import tools, dbcrud, dbdatapi
from services.user import User, login_manager, \
doors_login, doors_register
from services.text.utils import sanitize
else:
# when this script is run directly
print("*** comex services (dev server mode) ***")
from tools import mlog
import tools, dbcrud, dbdatapi
from user import User, login_manager, \
doors_login, doors_register
from text.utils import sanitize
# ============= app creation ============
config = tools.REALCONFIG
app = Flask("services",
static_folder=path.join(config['HOME'],"static"),
template_folder=path.join(config['HOME'],"templates"))
app.config['DEBUG'] = (config['LOG_LEVEL'] in ["DEBUG","DEBUGSQL"])
app.config['SECRET_KEY'] = config['PASSPHRASE']
# for SSL
app.config['PREFERRED_URL_SCHEME'] = 'https'
# for flask_login
cookie_timer = timedelta(days=7)
app.config['PERMANENT_SESSION_LIFETIME'] = cookie_timer
app.config['REMEMBER_COOKIE_DURATION'] = cookie_timer
app.config['REMEMBER_COOKIE_NAME'] = 'communityexplorer.org cookie'
app.config['REMEMBER_COOKIE_SECURE'] = True # we remember session only if https
login_manager.login_view = "login"
login_manager.session_protection = "strong"
login_manager.init_app(app)
########### PARAMS ###########
# all inputs as they are declared in form, as a couple
SOURCE_FIELDS = [
# NAME, SANITIZE? sanitizing specificity
("luid", False, None),
("doors_uid", False, None),
("email", True, None),
("country", True, "scountry"),
("first_name", True, None),
("middle_name", True, None),
("last_name", True, None),
("initials", True, None),
# => for *scholars* table
("position", True, None),
("hon_title", True, None),
("interests_text", True, None),
("gender", False, None), # M|F
("job_looking", True, "sbool"),
("job_looking_date", True, "sdate"),
("home_url", True, "surl"), # scholar's homepage
("pic_url", True, "surl"),
("pic_file", False, "sblob", "pic", "pic_fname"), # saved separately
# => for *scholars* table (optional)
("lab_label", True, "sorg"), # ~ /name (acro)?/
("lab_locname", True, None), # 'Paris, France'
("inst_label", True, "sorg"), # ~ /name (acro)?/
("inst_type", False, None), # predefined values
( "other_inst_type", True, None), # +=> org_type
# => for *orgs* table via parse_affiliation_records
("keywords", True, None),
# => for *keywords* table (after split str)
("hashtags", True, None)
# => for *hashtags* table (after split str)
]
# NB password values have already been sent by ajax to Doors
# sanitization params
JOB_FIELDS = [
# NAME, SANITIZE? sanitizing specificity
("uid", False, None),
("jtitle", True, None),
("mission_text", True, None),
("recruiter_org_text", True, None),
("email", True, None),
("locname", True, None),
("country", True, "scountry"),
("job_valid_date", True, "sdate"),
("job_type", False, None), # predefined values
("pdf_attachment", True, "sblob", "pdf", "pdf_fname"), # saved separately
# => for *jobs* table
("keywords", True, None)
# => for *keywords* table (after split str)
]
# ============= context =============
@app.context_processor
def inject_doors_params():
"""
Keys will be available in *all* templates
-> 'doors_connect'
(base_layout-rendered templates need it for login popup)
"""
if 'DOORS_PORT' not in config or config['DOORS_PORT'] in ['80', '443']:
context_dict = {
'doors_connect': config['DOORS_HOST'],
'doors_scheme': 'http:' if config['DOORS_NOSSL'] else 'https:'
}
else:
context_dict = {
'doors_connect': config['DOORS_HOST']+':'+config['DOORS_PORT'],
'doors_scheme': 'http:' if config['DOORS_NOSSL'] else 'https:'
}
return context_dict
@login_manager.unauthorized_handler
def unauthorized():
"""
Generic handler for all unauthorized
(pages requires login)
NB: Redirecting here is done by @login_required decorators
"""
return render_template(
"message.html",
message = """
Please <strong>
<a onclick="cmxClt.elts.box.toggleBox('auth_modal',{'nextPage':'%(tgt)s'})">login here</a>
</strong>.
<br/><br/>
The page <span class='code'>%(tgt)s</span> is only available after login.
""" % {'tgt': request.path,
'login': url_for('login', next=request.path, _external=True)}
)
def reroute(function_name_str):
return redirect(url_for(function_name_str, _external=True))
# ============= views =============
# -----------------------------------------------------------------------
# /!\ Routes are not prefixed by nginx in prod so we do it ourselves /!\
# -----------------------------------------------------------------------
@app.route("/")
def rootindex():
"""
Root of the comex2 app (new index)
Demo CSS with alternative index (top like old index, then underneath some layout à la notebook)
also useful to be able to url_for('rootindex') when redirecting to php
"""
return render_template(
"rootindex.html"
)
# /services/
@app.route(config['PREFIX']+'/')
def services():
return reroute('login')
# /services/api/aggs
@app.route(config['PREFIX'] + config['API_ROUTE'] + '/aggs')
def aggs_api():
"""
API to read DB aggregation data (ex: for autocompletes)
REST params
like:str an optional filter for select
hapax:int an optional min count threshold
"""
if 'field' in request.args:
search_filter = None
hap_thresh = None
if 'like' in request.args:
try:
search_filter = str(request.args['like'])
except:
pass
if 'hapax' in request.args:
try:
hap_thresh = int(request.args['hapax'])
except:
pass
if hap_thresh is None:
hap_thresh = int(config['HAPAX_THRESHOLD'])
# field name itself is tested by db module
result = dbdatapi.get_field_aggs(
request.args['field'],
search_filter_str=search_filter,
hapax_threshold=hap_thresh
)
return Response(
response=dumps(result),
status=200,
mimetype="application/json")
else:
raise TypeError("aggs API query is missing 'field' argument")
# /services/api/jobmatch
@app.route(config['PREFIX'] + config['API_ROUTE'] + '/jobmatch')
def jobmatch():
"""
Demo API for a custom multimatch graph on keywords pivot
POSS: factorize with multimatch by making params from pivot (and filters ?)
"""
graph = {'links':{}, 'nodes':{}}
try:
graph = dbdatapi.jobmatch()
except():
pass
return(
Response(
response=dumps(graph),
status=200,
mimetype="application/json")
)
# /services/api/multimatch
@app.route(config['PREFIX'] + config['API_ROUTE'] + '/multimatch')
def multimatch_graph_api():
"""
API to provide json extracts of the DB to tinaweb
(uses the new approach dbdatapi.multimatch)
"""
graph = {'links':{}, 'nodes':{}}
supported_types = ["sch","lab" ,"inst","kw" ,"ht" ,"country", "jobs_and_candidates"]
# default types
type0 = 'kw'
type1 = 'sch'
# constraints
sql_filters = []
# default pivot
pivot_type = 'scholars'
if 'type0' in request.args and 'type1' in request.args:
type0 = request.args['type0']
type1 = request.args['type1']
if 'pivot_type' in request.args and request.args['pivot_type'] in ['scholars', 'keywords']:
pivot_type = request.args['pivot_type']
mlog("INFO", "multimatch query for", type0, type1, 'via', pivot_type)
if type0 in supported_types and type1 in supported_types:
if 'qtype' in request.args:
if request.args['qtype'] == 'filters':
# query is a set of filters like: key <=> array of values
# (expressed as rest parameters: "keyA[]=valA1&keyB[]=valB1&keyB[]=valB2")
# 1. we create a representation as dict of arrays
filterq_dict = tools.restparse(request.query_string.decode())
# 2. we map it to an sql conjunction of alternatives
# ==> WHERE colA IN ("valA1") AND colB IN ("valB1", "valB2")
sql_filters = dbdatapi.rest_filters_to_sql(filterq_dict)
elif request.args['qtype'] == "uid" and 'unique_id' in request.args:
# query is the id of a unique scholar
# 1. remove quotes from id
unique_id = int(sub(r'^"|"$', '', request.args['unique_id']))
# 2. get the direct neighbors of the scholar
# 3. use them as an 'IN' filter :/
neighbors = dbdatapi.kw_neighbors(unique_id)
sql_filters = ["full_scholar.luid IN %s" % (
'('+','.join([str(nei["uid"]) for nei in neighbors])+')'
)]
mlog("INFO","query => SQL", sql_filters)
graph = dbdatapi.multimatch(
type0,
type1,
pivot_filters = sql_filters,
pivot_type = pivot_type
)
return(
Response(
response=dumps(graph),
status=200,
mimetype="application/json")
)
# /services/api/graph
@app.route(config['PREFIX'] + config['API_ROUTE'] + '/graph')
def graph_api():
"""
API to provide json extracts of the DB to tinaweb
(originally @ moma/legacy_php_comex/tree/master/comex_install)
(original author S. Castillo)
"""
if 'qtype' in request.args:
graphdb = dbdatapi.BipartiteExtractor(config['SQL_HOST'])
# request.query_string
# => b'qtype=filters&tags[]=%23iscpif'
# tools.restparse(request.query_string.decode())
# => {'qtype': 'filters', 'tags': ['#iscpif']}
scholars = graphdb.getScholarsList(
request.args['qtype'],
tools.restparse(
request.query_string.decode()
)
)
if scholars and len(scholars):
# Data Extraction
# (getting details for selected scholars into graph object)
# when filtering, TODO do it along with previous step getScholarsList
# (less modular but a lot faster)
graphdb.extract(scholars)
return(
Response(
response=dumps(graphdb.buildJSON(graphdb.Graph)),
status=200,
mimetype="application/json")
)
else:
raise TypeError("graph API query is missing qtype (should be 'filters' or 'uid')")
# /services/api/user
@app.route(config['PREFIX'] + config['API_ROUTE'] + '/user')
def user_api():
"""
API to provide json infos about user DB
implemented "op" <=> verbs:
exists => bool
"""
if 'op' in request.args:
if request.args['op'] == "exists":
if 'email' in request.args:
email = sanitize(request.args['email'])
return(
Response(
response=dumps({'exists':dbcrud.email_exists(email)}),
status=200,
mimetype="application/json")
)
else:
raise TypeError("user API query is missing the operation to perform (eg op=exists)")
# /services/user/
@app.route(config['PREFIX'] + config['USR_ROUTE']+'/', methods=['GET'])
def user():
return reroute('login')
# /services/user/login/
@app.route(config['PREFIX'] + config['USR_ROUTE'] + '/login/', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template(
"login.html"
)
elif request.method == 'POST':
mlog("DEBUG", "LOGIN: form received from "+request.path+", with keys:", [k for k in request.values])
# we used this custom header to mark ajax calls => called_as_api True
x_req_with = request.headers.get('X-Requested-With', type=str)
called_as_api = (x_req_with in ['XMLHttpRequest', 'MyFetchRequest'])
# testing the captcha answer
captcha_userinput = request.form['my-captcha']
captcha_userhash = tools.re_hash(captcha_userinput)
captcha_verifhash = int(request.form['my-captchaHash'])
# dbg
# mlog("DEBUG", "login captcha verif", str(captcha_verifhash))
# mlog("DEBUG", "login captcha user", str(captcha_userhash))
if captcha_userhash != captcha_verifhash:
mlog("WARNING", "pb captcha rejected")
return render_template(
"message.html",
message = """
We're sorry the "captcha" information you entered was wrong!
<br/>
<strong><a href="%s">Retry login here</a></strong>.
""" % url_for('login', _external=True)
)
else:
# OK captcha accepted
email = request.form['email']
pwd = request.form['password']
# we do our doors request here server-side to avoid MiM attack on result
try:
doors_uid = doors_login(email, pwd, config)
except Exception as err:
mlog("ERROR", "LOGIN: error in doors_login request")
raise (err)
mlog("DEBUG", "user.doors_login() returned doors_uid '%s'" % doors_uid)
if doors_uid is None:
# break: can't doors_login
nologin_message = """<b>The login exists but it was invalid!</b><br/>Perhaps the password was wrong ?<br/>Or perhaps you never checked your mailbox and clicked on the validation link ?"""
if called_as_api:
# menubar login will prevent redirect
return(nologin_message, 404)
else:
return render_template(
"message.html",
message = nologin_message
)
luid = dbcrud.doors_uid_to_luid(doors_uid)
if luid:
# normal user
user = User(luid)
else:
mlog("DEBUG", "LOGIN: encountered new doors id (%s), switching to empty user profile" % doors_uid)
# user exists in doors but has no comex profile nor luid yet
dbcrud.save_doors_temp_user(doors_uid, email) # preserve the email
user = User(None, doors_uid=doors_uid) # get a user.empty
# ==================================================================
login_ok = login_user(user, remember=True)
# -------------
# creates REMEMBER_COOKIE_NAME
# (keep session open if cookie was sent in https)
# ==================================================================
mlog('INFO',
'login of %s (%s) was %s' % (str(luid),
doors_uid,
str(login_ok))
)
if not login_ok:
# break: failed to login_user()
notok_message = "LOGIN There was an unknown problem with the login."
if called_as_api:
# menubar login will prevent redirect
return(nologin_message, 404)
else:
return render_template(
"message.html",
message = notok_message
)
# ========
# OK cases
# ========
if called_as_api:
# menubar login will do the redirect
return('', 204)
elif user.empty:
mlog('DEBUG',"empty user redirected to profile")
# we go straight to empty profile for the person to create infos
return reroute('profile')
# normal call, normal user
else:
mlog('DEBUG', "normal user login redirect")
next_url = request.args.get('next', None)
if not next_url:
return reroute('profile')
else:
next_url = unquote(next_url)
mlog("DEBUG", "login with next_url:", next_url)
safe_flag = tools.is_safe_url(next_url, request.host_url)
# normal next_url
if safe_flag:
# if relative
if next_url[0] == '/':
next_url = url_for('rootindex', _external=True) + next_url[1:]
mlog("DEBUG", "LOGIN: reabsoluted next_url:", next_url)
return(redirect(next_url))
else:
# server name is different than ours
# in next_url so we won't go there
return reroute('rootindex')
# /services/user/logout/
@app.route(config['PREFIX'] + config['USR_ROUTE'] + '/logout/')
def logout():
logout_user()
mlog('INFO', 'logged out previous user')
return reroute('rootindex')
# /services/user/profile/
@app.route(config['PREFIX'] + config['USR_ROUTE'] + '/profile/', methods=['GET', 'POST'])
@fresh_login_required
def profile():
"""
Entrypoint for users to load/re-save personal data
@login_required uses flask_login to relay User object current_user
"""
if request.method == 'GET':
# login provides us current_user
if current_user.empty:
mlog("INFO", "PROFILE: empty current_user %s" % current_user.uid)
else:
mlog("INFO", "PROFILE: current_user %s" % current_user.uid)
mlog("DEBUG", "PROFILE: current_user details: \n - %s" % (
'\n - '.join([current_user.info['email'],
current_user.info['initials'],
current_user.info['doors_uid'] if current_user.info['doors_uid'] else "(no doors_uid)" ,
str(current_user.info['keywords']),
current_user.info['country']]
)
)
)
# debug session cookies
# print("[k for k in session.keys()]",[k for k in session.keys()])
mlog("DEBUG", "PROFILE view with flag session.new = ", session.new)
return render_template(
"profile.html"
# NB we also got user info in {{current_user.info}}
# and {{current_user.json_info}}
)
elif request.method == 'POST':
mlog("DEBUG", "saving profile with request.form=", request.form)
# ajax calls get a little html, normal calls get a full page
x_req_with = request.headers.get('X-Requested-With', type=str)
called_as_api = (x_req_with in ['XMLHttpRequest', 'MyFetchRequest'])
answer_template = "bare_thank_you.html" if called_as_api else "thank_you.html"
mlog("DEBUG", "profile update flag called_as_api=", called_as_api)
# special action DELETE!!
if 'delete_user' in request.form and request.form['delete_user'] == 'on':
the_id_to_delete = current_user.uid
mlog("INFO",
"executing DELETE scholar's data at the request of user %s" % str(the_id_to_delete))
# remove saved image if any
if current_user.info['pic_fname']:
remove(path.join(*tools.IMAGE_SAVING_POINT, current_user.info['pic_fname']))
logout_user()
dbcrud.rm_scholar(the_id_to_delete)
return reroute('rootindex')
else:
# input fields data ~> normalized {cols:values}
our_records = read_record_from_request(request)
# small gotcha: absence of the file input is the default in GUI
# (even when we keep the image)
if 'pic_fname' not in our_records and 'pic_fname' in current_user.info and current_user.info['pic_fname']:
our_records['pic_fname'] = current_user.info['pic_fname']
# POSS:
# a dedicated button to indicate that the pic should be rm
# special action CREATE for a new user already known to doors
if current_user.empty:
mlog("DEBUG",
"create profile from new doors user %s" % current_user.doors_uid)
# remove the empty luid
our_records.pop('luid')
# add the doors_uid and doors_email to the form (same keynames!)
our_records = { **our_records, **current_user.doors_info }
try:
# *create* this user in our DB
luid = save_form(our_records, update_flag = False)
except Exception as perr:
return render_template("thank_you.html",
form_accepted = False,
backend_error = True,
debug_message = tools.format_err(perr)
)
# if all went well we can remove the temporary doors user data
dbcrud.rm_doors_temp_user(current_user.doors_uid)
logout_user()
# .. and login the user in his new mode
login_user(User(luid))
return render_template(
answer_template,
debug_records = (our_records if app.config['DEBUG'] else {}),
form_accepted = True,
backend_error = False
)
# normal action UPDATE
else:
try:
luid = save_form(our_records,
update_flag = True,
previous_user_info = current_user.info)
except Exception as perr:
return render_template(
answer_template,
form_accepted = False,
backend_error = True,
debug_message = tools.format_err(perr)
)
return render_template(
answer_template,
debug_records = (our_records if app.config['DEBUG'] else {}),
form_accepted = True,
backend_error = False
)
# /services/user/claim_profile/
@app.route(config['PREFIX'] + config['USR_ROUTE'] + '/claim_profile/', methods=['GET', 'POST'])
def claim_profile():
"""
For returning users (access by token as GET arg)
"""
if request.method == 'GET':
return_token = None
luid = None
# identify who came back from the return token
if 'token' in request.args:
return_token = sanitize(request.args['token'])
if (return_token
and type(return_token) == str
and len(return_token) == 36):
luid = dbcrud.get_legacy_user(return_token)
if luid is not None:
try:
return_user = User(luid)
except ValueError:
return_user = None
# claim failure cases
if return_token is None or luid is None or return_user is None:
mlog('INFO', 'failed claim profile GET with return_token=%s, luid=%s' % (str(return_token),str(luid)))
return render_template(
"message.html",
message = """
<p><b>This activation link has already been used !</b></p>
<p>If you just created a new password for an archived profile:
</p>
<ol>
<li>go and click the validation link in your <b>confirmation email</b></li>
<li>then come back here to <span class='link-like' onclick="cmxClt.elts.box.toggleBox('auth_modal')">login</span></li>
</ol>
<br/>
<p>
Otherwise you can also register a completely new account via <span class='code'><a href="%(register_url)s">%(register_url)s</a></span>
</p>
""" % { 'register_url': url_for('register') }
)
# claim success
else:
mlog('DEBUG', "successful claim_profile GET for luid =", luid)
# we *don't* log him in but we do put his data as return_user
# => this way we can use templating to show the data
return render_template(
"claim_profile.html",
return_user = return_user
)
elif request.method == 'POST':
email = request.form['email']
pwd = request.form['password']
luid = request.form['return_user_luid']
return_user = User(luid)
info = return_user.info
name = info['last_name']+', '+info['first_name']
if info['middle_name']:
name += ' '+info['middle_name']
# we do our doors request here server-side to avoid MiM attack on result
try:
doors_uid = doors_register(email, pwd, name, config)
except Exception as err:
mlog("ERROR", "error in doors_register remote request")
raise (err)
mlog("DEBUG", "doors_register returned doors_uid '%s'" % doors_uid)
if doors_uid is None:
return render_template(
"thank_you.html",
form_accepted = False,
backend_error = True,
debug_message = "No ID was returned from the portal at registration"
)
else:
try:
db_connection = dbcrud.connect_db(config)
dbcrud.update_scholar_cols({
'doors_uid':doors_uid,
'record_status': 'active',
'valid_date': None
},
db_connection,
where_luid=return_user.uid)
db_connection.close()
# the user is not a legacy user anymore
# POSS: do this on first login instead
dbcrud.rm_legacy_user_rettoken(luid)
except Exception as perr:
return render_template(
"thank_you.html",
form_accepted = False,
backend_error = True,
debug_message = tools.format_err(perr)
)
mlog('DEBUG', "successful claim_profile for luid =", luid)
return render_template(
"message.html",
message = """
<p>Your new login credentials are saved. To complete your registration:</p>
<ol>
<li>go check your mailbox and click the link in your <b>confirmation email</b></li>
<li>then come back here to <span class='link-like' onclick="cmxClt.elts.box.toggleBox('auth_modal')">login</span></li> to your old profile with your new credentials.
</ol>
"""
)
# /services/user/register/
@app.route(config['PREFIX'] + config['USR_ROUTE'] + '/register/', methods=['GET','POST'])
def register():
# debug
# mlog("DEBUG", "register route: ", config['PREFIX'] + config['USR_ROUTE'] + '/register')
if request.method == 'GET':
return render_template(
"registration_super_short_form.html"
)
elif request.method == 'POST':
# ex: request.form = ImmutableMultiDict([('initials', 'R.L.'), ('email', 'romain.loth@iscpif.fr'), ('last_name', 'Loth'), ('country', 'France'), ('first_name', 'Romain'), ('my-captchaHash', '-773776109'), ('my-captcha', 'TSZVIN')])
# mlog("DEBUG", "GOT ANSWERS <<========<<", request.form)
# 1 - testing the captcha answer
captcha_userinput = request.form['my-captcha']
captcha_userhash = tools.re_hash(captcha_userinput)
captcha_verifhash = int(request.form['my-captchaHash'])
# dbg
# mlog("DEBUG", str(captcha_verifhash))
if captcha_userhash != captcha_verifhash:
mlog("INFO", "pb captcha rejected")
form_accepted = False
# normal case
else:
mlog("INFO", "ok form accepted")
form_accepted = True
clean_records = {}
# 1) handles all the inputs from form
# (using SOURCE_FIELDS recreates USER_COLS)
clean_records = read_record_from_request(request)
try:
# 2) saves the records to db
luid = save_form(clean_records)
except Exception as perr:
return render_template(
"thank_you.html",
form_accepted = False,
backend_error = True,
debug_message = tools.format_err(perr)
)
# all went well: we can login the user
login_user(User(luid))
return render_template(
"thank_you.html",
debug_records = (clean_records if app.config['DEBUG'] else {}),
form_accepted = form_accepted,
backend_error = False,
message = """
You can now visit elements of the members section:
<ul style="list-style-type: none; font-size:140%%;">
<li>
<span class="glyphicon glyphicon glyphicon-education"></span>
<a href="/services/user/profile"> Your Profile </a>
</li>
<li>
<span class="glyphicon glyphicon-eye-open"></span>
<a href='/explorerjs.html?sourcemode="api"&type="uid"&srcparams=%(luid)i'> Your Map </a>
</li>
<li>
<span class="glyphicon glyphicon glyphicon-stats"></span>
<a href='/print_scholar_directory.php?query=%(luid)i'> Your Neighbor Directory and Stats </a>
</li>
</ul>
""" % {'luid': luid })
# /services/job/
@app.route(config['PREFIX'] + '/job/<string:provided_job_id>', methods=['GET'])
def seejob(provided_job_id):
err_msg = ''
if provided_job_id:
jobs = dbcrud.get_jobs(job_id = provided_job_id)
mlog("DEBUG", "got request with provided_job_id", provided_job_id,
"dbcrud retrieved jobs:", jobs)
if len(jobs) == 1:
return render_template("job_ad.html", existing_jobinfo=dumps(tools.prejsonize(jobs[0])))
else:
err_msg = 'No matching jobs for id=%s.' % provided_job_id
else:
err_msg = 'You need to provide a job id to consult the job.'
return render_template(
"message.html",
message = """
We couldn't find the corresponding job.<br>
%s <br>
Please refer to <a href="/services/jobboard/"> the job-market </a> for a complete list of available jobs.
""" % err_msg
)
# /services/addjob/
@app.route(config['PREFIX'] + '/addjob/', methods=['GET','POST'])
@fresh_login_required
def addjob():
# debug
# mlog("DEBUG", "register route: ", config['PREFIX'] + config['USR_ROUTE'] + '/register')
# show form
if request.method == 'GET':
return render_template("job_ad.html", existing_jobinfo=False)
# save form
elif request.method == 'POST':
clean_records = read_record_from_request(request, JOB_FIELDS)
# exemple clean_records
# {'uid': '4206', 'job_valid_date': '2017/09/30', 'mission_text': 'In the town where I was born Lived a man who sailed the sea', 'email': 'romain.loth@truc.org', 'recruiter_org_text': 'We all live in a yellow submarine'}
mlog("DEBUG", 'job record contents:', clean_records)
jobid = dbcrud.save_job( clean_records )
# save associated keywords
kwids = dbcrud.get_or_create_tokitems(clean_records['keywords'])
dbcrud.save_pairs_fkey_tok(
[(jobid, kwid) for kwid in kwids],
map_table = "job_kw"
)
return render_template(
"message.html",
message = """
Your job ad was successfully recorded. You can now find it in your <a href="/services/user/myjobs"> job-board section </a>
"""
)
# /services/jobboard/
@app.route(config['PREFIX'] + '/jobboard/', methods=['GET'])
def jobboard():
# array of dicts
all_jobs = dbcrud.get_jobs()
# ({'email': 'romain.loth@truc.org',
# 'job_valid_date': datetime.date(2017, 9, 30),
# 'jobid': 1,
# 'last_modified': datetime.datetime(2017, 7, 13, 7, 55, 24),
# 'mission_text': 'In the town where I was born \nLived a man who sailed the sea',
# 'recruiter_org_text': 'We all live in a yellow submarine',
# 'uid': 4206},
# {'email': 'romain.loth@iscpif.fr',
# 'job_valid_date': datetime.date(2018, 9, 14),
# 'jobid': 2,
# 'last_modified': datetime.datetime(2017, 7, 13, 8, 2, 31),
# 'mission_text': 'Job zwei',
# 'recruiter_org_text': 'oo',
# 'uid': 4221})
json_rows = dumps([tools.prejsonize(job) for job in all_jobs])
return render_template(
"job_board.html",
message = """
You'll find here all the currently available jobs on our server.
""",
jobs_table = json_rows,
can_edit = 0
)
# /services/user/myjobs/
@app.route(config['PREFIX'] + config['USR_ROUTE'] + '/myjobs/', methods=['GET'])
@fresh_login_required
def myjobs():
# jobs filtered by uid
all_jobs = dbcrud.get_jobs(current_user.uid)
json_rows = dumps([tools.prejsonize(job) for job in all_jobs])
return render_template(
"job_board.html",
message = """
This is the list of jobs you entered. You are the admin of this list and can add, edit or remove items.
""",
jobs_table = json_rows,
can_edit = 1
)
# /services/api/jobs
@fresh_login_required
@app.route(config['PREFIX'] + config['API_ROUTE'] + '/jobs/', methods=['POST', 'DELETE'])
def api_job():
# check if login ok
if not hasattr(current_user, 'uid'):
return unauthorized()
# testing if cookie user is the same as payload data user
if request.method == 'POST':
jobid = request.form.get('jobid')
job_author_uid = request.form.get('uid')
elif request.method == 'DELETE':
jobid = request.args.get('jobid')
job_author_uid = request.args.get('author')
# proceed...
if (jobid and (str(job_author_uid) == str(current_user.uid))):
# ...updating an existing job
if request.method == 'POST':
mlog("DEBUG",
'received api POST job:', jobid, job_author_uid)
if 'jobid' in request.form:
new_data = read_record_from_request(request, JOB_FIELDS)
try:
# compare with previous pdf
# and remove from filesystem if changed
if 'pdf_fname' in new_data:
old_pdf_fname = dbcrud.find_jobs_pdf(jobid)
if (old_pdf_fname and new_data['pdf_fname'] != old_pdf_fname):
remove(path.join(
*tools.BLOB_SAVING_POINT,
old_pdf_fname
))
# update job
dbcrud.save_job(new_data, jobid)
# update associated keywords
dbcrud.delete_pairs_fkey_tok(jobid, map_table = "job_kw")
kwids = dbcrud.get_or_create_tokitems(new_data['keywords'])
dbcrud.save_pairs_fkey_tok(
[(jobid, kwid) for kwid in kwids],
map_table = "job_kw"
)
except Exception as dberr:
return Response(
response=dumps({'error': tools.format_err(dberr)}),
status=500,
mimetype="application/json")
return Response(
response=dumps({'updated': jobid}),
status=200,
mimetype="application/json")
# ...or just deleting it
elif request.method == 'DELETE':
mlog("DEBUG",
'received api delete job:', jobid, job_author_uid)
try:
# remove pdf if any
old_pdf_fname = dbcrud.find_jobs_pdf(jobid)
if old_pdf_fname:
remove(path.join(*tools.BLOB_SAVING_POINT, old_pdf_fname))
# remove job itself
deleted_jobid = dbcrud.delete_job(jobid, job_author_uid)
except Exception as dberr:
return Response(
response=dumps({'error': tools.format_err(dberr)}),
status=500,
mimetype="application/json")
return Response(
response=dumps({'deleted': deleted_jobid}),
status=200,
mimetype="application/json")
# if user id doesn't match, fake message just in case
else:
return Response(
response=dumps({'error': 'the provided arguments did not match any job resource'}),
status=400,
mimetype="application/json")
# ==============================================================================
# any static pages with topbar are set in /about prefix
# /about/privacy
@app.route('/about/privacy')
def show_privacy():
return render_template("privacy.html")
# /about/tips
@app.route('/about/tips')
def show_tips():
return render_template("tips.html")
########### SUBS ###########
def parse_affiliation_records(clean_records):
"""
Transform GUI side input data into at most 2 orgs objects for DB
In general:
1) the front-end inputs are less free than the DB structure
(DB could save an array of orgids but in the inputs they're only allowed max 2 atm : lab and inst)
2) each org has its microstructure:
- name, acronym, class, location (base properties)
- inst_type (specific to institutions)
- lab_code, url, contact <= not fillable in GUI yet
3) between themselves 2 orgs can have org_org relationships
TODO LATER (not a priority)
4) we want at least one of lab_label or inst_label to be NOT NULL
Choices:
- values are already sanitized by read_record_from_request
- We call label the concatenated name + acronym information,
handling here the possibilities for the input via str analysis
(just short name, just long name, both)
- We return a map with 2 key/value submaps for lab and institutions
"""
new_orgs = {'lab': None, 'inst': None}
for org_class in new_orgs:
# can't create org without some kind of label
if (org_class+"_label" not in clean_records
or not len(clean_records[org_class+"_label"])):
pass
else:
# submap
new_org_info = {}
# 1) label analysis
clean_input = clean_records[org_class+"_label"]
# label split attempt
test_two_groups = match(
r'([^\(]+)(?: *\(([^\)]{1,30})\))?',
clean_input
)
if (test_two_groups
and test_two_groups.groups()[0]
and test_two_groups.groups()[1]):
# ex 'Centre National de la Recherche Scientifique (CNRS)'
# vvvvvvvvvvvvvvvv vvvv
# name acro
name_candidate = test_two_groups.groups()[0]
acro_candidate = test_two_groups.groups()[1]
new_org_info['name'] = name_candidate.strip()
new_org_info['acro'] = acro_candidate.strip()
mlog("DEBUG", "parse_affiliation_records found name='%s' and acro='%s'" % (new_org_info['name'], new_org_info['acro']))
else:
len_input = len(clean_input)
test_uppercase = sub(r'[^0-9A-ZÉ\.]', '', clean_input)
uppercase_rate = len(test_uppercase) / len_input
# special case short and mostly uppercase => just acro
# POSS tune len and uppercase_rate
if (len_input <= 8 or
(len_input <= 20 and uppercase_rate > .7)):
# ex 'CNRS'
# vvvv
# acro
new_org_info['acro'] = clean_input
# normal fallback case => just name
else:
# ex 'Centre National de la Recherche Scientifique' None
# vvvvvvvvvvvvvvvv vvvv
# name acro
new_org_info['name'] = clean_input
# 2) enrich with any other optional org info
for detail_col in ['inst_type', 'lab_code', 'locname',
'url', 'contact_email', 'contact_name']:
if detail_col not in ['inst_type', 'lab_code']:
# this is a convention in our templates
org_detail = org_class + '_' + detail_col
else:
org_detail = detail_col
if org_detail in clean_records:
val = clean_records[org_detail]
if len(val):
new_org_info[detail_col] = val
# 3) keep
new_orgs[org_class] = new_org_info
return new_orgs
def save_form(clean_records, update_flag=False, previous_user_info=None):
"""
wrapper function for save profile/register (all DB-related form actions)
@args :
*clean_records* a dict of sanitized form fields
optional (together):
update_flag we update in DB instead of INSERT
previous_user_info iff update_flag, like current_user.info
"""
# A) a new DB connection
reg_db = dbcrud.connect_db(config)
# B1) re-group the org fields into at most 2 org 'objects'
declared_orgs = parse_affiliation_records(clean_records)
mlog('DEBUG', 'save_form: declared values for org =', declared_orgs)
# B2) for each optional declared org,
# read/fill the orgs table to get associated id(s) in DB
orgids = []
for oclass in ['lab', 'inst']:
if (declared_orgs[oclass]):
orgids.append(
dbcrud.get_or_create_org(declared_orgs[oclass], oclass, reg_db)
)
mlog('DEBUG', 'save_form: found ids for orgs =', orgids)
# B3) save the org <=> org mappings TODO LATER (not a priority)
# dbcrud.record_org_org_link(src_orgid, tgt_orgid, reg_db)
# C) create/update record into the primary user table
# ----------------------------------------------------
# TODO class User method !!
luid = None
if update_flag:
luid = int(previous_user_info['luid'])
sent_luid = int(clean_records['luid'])
if luid != sent_luid:
mlog("WARNING", "User %i attempted to modify the data of another user (%i)!... Aborting update" % (luid, sent_luid))
return None
else:
dbcrud.save_full_scholar(clean_records, reg_db, update_user=previous_user_info)
# remove previous image from filesystem if changed
if (previous_user_info['pic_fname']
and ('pic_fname' in clean_records
and
previous_user_info['pic_fname'] != clean_records['pic_fname'])):
remove(path.join(*tools.IMAGE_SAVING_POINT, previous_user_info['pic_fname']))
else:
luid = int(dbcrud.save_full_scholar(clean_records, reg_db))
# D) read/fill each keyword and save the (uid <=> kwid) pairings
# read/fill each hashtag and save the (uid <=> htid) pairings
for intable in ['keywords', 'hashtags']:
tok_field = intable
if tok_field in clean_records:
tok_table = tok_field
map_table = "sch_" + ('kw' if intable == 'keywords' else 'ht')
tokids = dbcrud.get_or_create_tokitems(
clean_records[tok_field],
reg_db,
tok_table
)
# TODO class User method !!
# POSS selective delete ?
if update_flag:
dbcrud.delete_pairs_fkey_tok(luid, reg_db, map_table)
dbcrud.save_pairs_fkey_tok(
[(luid, tokid) for tokid in tokids],
reg_db,
map_table
)
# E) overwrite the (uid <=> orgid) mapping(s)
dbcrud.rm_sch_org_links(luid, reg_db)
mlog("DEBUG", "removing all orgs for", luid)
for orgid in orgids:
mlog("DEBUG", "recording orgs:", luid, orgid)
dbcrud.record_sch_org_link(luid, orgid, reg_db)
# F) end connection
reg_db.close()
return luid
def read_record_from_request(request, optional_fields = None):
"""
Runs all request-related form actions
Arg:
a flask request
werkzeug.pocoo.org/docs/0.11/wrappers/#werkzeug.wrappers.Request
Process:
input SOURCE_FIELDS data ~> normalized {COLS:values}
Custom made for comex registration forms
- request.form fields: sanitization + string normalization as needed
- request.files blob: save to fs + keep ref in filename col
"""
# init var
clean_records = {}
# sources: request.form and request.files
if (optional_fields):
fields = optional_fields
else:
fields = SOURCE_FIELDS
# we should have all the mandatory fields (checked in client-side js)
# POSS recheck b/c if post comes from elsewhere
for field_info in fields:
field = field_info[0]
do_sanitize = field_info[1]
spec_type = field_info[2]
if field in request.form:
if do_sanitize:
val = sanitize(request.form[field], spec_type)
if val != '':
clean_records[field] = val
else:
# mysql will want None instead of ''
val = None
# any other fields that don't need sanitization (ex: menu options)
else:
clean_records[field] = request.form[field]
# these ones have a blob treatment: saved + replaced by filename
elif (hasattr(request, "files")
and field in request.files
and request.files[field] # <= to test if file not empty
and spec_type == "sblob"):
# read 2 additional attributes: fileext and tgt dbfield
file_type = field_info[3]
ref_fieldname = field_info[4]
new_filename = tools.save_blob_and_get_filename(
request.files[field],
file_type
)
clean_records[ref_fieldname] = new_filename
mlog("DEBUG", "new blob with fname", new_filename)
# special treatment for "other" subquestions
if 'inst_type' in clean_records:
if clean_records['inst_type'] == 'other' and 'other_inst_type' in clean_records:
clean_records['inst_type'] = clean_records['other_inst_type']
# splits for kw_array and ht_array
for tok_field in ['keywords', 'hashtags']:
if tok_field in clean_records:
mlog("DEBUG",
"in clean_records, found a field to tokenize: %s" % tok_field)
temp_array = []
for tok in clean_records[tok_field].split(','):
tok = sanitize(tok)
if tok != '':
temp_array.append(tok)
# replace str by array
clean_records[tok_field] = temp_array
return clean_records
########### MAIN ###########
# this can only be used for debug
# (in general use comex-run.sh to run the app)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8989)
|
moma/comex2
|
services/main.py
|
Python
|
agpl-3.0
| 52,289
|
[
"VisIt"
] |
a26bbceb1012fa3331a92f508193dd7b6e660078e7fcc53572c72d1d0ce42f3e
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
if __name__=='pymol.wizarding':
import pymol
import imp
import sys
import string
import cmd
from cmd import _cmd,lock,unlock,Shortcut,QuietException,_raising, \
_feedback,fb_module,fb_mask, \
DEFAULT_ERROR, DEFAULT_SUCCESS, _raising, is_ok, is_error
import cPickle
import traceback
def _wizard(name,arg,kwd,replace,_self=cmd):
r = DEFAULT_ERROR
import wizard
try:
full_name = 'pymol.wizard.'+name
if not sys.modules.has_key(full_name):
mod_tup = imp.find_module(name,wizard.__path__)
mod_obj = imp.load_module(full_name,mod_tup[0],
mod_tup[1],mod_tup[2])
else:
mod_obj = sys.modules[full_name]
if mod_obj:
oname = string.capitalize(name)
r = DEFAULT_SUCCESS
if hasattr(mod_obj,oname):
kwd['_self']=_self
wiz = apply(getattr(mod_obj,oname),arg,kwd)
if wiz:
_self.set_wizard(wiz,replace)
_self.do("_ refresh_wizard")
else:
print "Error: Sorry, couldn't find the '"+oname+"' class."
else:
print "Error: Sorry, couldn't import the '"+name+"' wizard."
except ImportError:
print "Error: Sorry, couldn't import the '"+name+"' wizard."
return r
def wizard(name=None,*arg,**kwd):
'''
DESCRIPTION
"wizard" launches on of the built-in wizards. There are special
Python scripts which work with PyMOL in order to obtain direct user
interaction and easily peform complicated tasks.
USAGE
wizard name
PYMOL API
cmd.wizard(string name)
EXAMPLE
wizard distance # launches the distance measurement wizard
'''
_self = kwd.get('_self',cmd)
r = DEFAULT_ERROR
if name==None:
_self.set_wizard()
r = DEFAULT_SUCCESS
else:
name = str(name)
if string.lower(name)=='distance': # legacy compatibility
name = 'measurement'
r = _wizard(name,arg,kwd,0,_self=_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def replace_wizard(name=None,*arg,**kwd):
'''
DESCRIPTION
"replace_wizard" is an unsupported internal command.
'''
_self = kwd.get('_self',cmd)
r = DEFAULT_ERROR
if name==None:
_self.set_wizard()
r = DEFAULT_SUCCESS
else:
r = _wizard(name,arg,kwd,1,_self=_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def set_wizard(wizard=None,replace=0,_self=cmd): # INTERNAL
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.set_wizard(_self._COb,wizard,replace)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def set_wizard_stack(stack=[],_self=cmd): # INTERNAL
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.set_wizard_stack(_self._COb,stack)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def refresh_wizard(_self=cmd): # INTERNAL
'''
DESCRIPTION
"refresh_wizard" is in unsupported internal command.
'''
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.refresh_wizard(_self._COb)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def dirty_wizard(_self=cmd): # INTERNAL
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.dirty_wizard(_self._COb)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def get_wizard(_self=cmd): # INTERNAL
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.get_wizard(_self._COb)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def get_wizard_stack(_self=cmd): # INTERNAL
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.get_wizard_stack(_self._COb)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise pymol.CmdException
return r
def session_save_wizard(session,_self=cmd):
# double-pickle so that session file is class-independent
stack = cmd.get_wizard_stack(_self=_self)
session['wizard']=cPickle.dumps(stack,1)
return 1
def session_restore_wizard(session,_self=cmd):
if session!=None:
if session.has_key('wizard'):
try:
wizards = cPickle.loads(session['wizard'])
for wiz in wizards:
wiz.cmd = _self
_self.set_wizard_stack(wizards,_self=_self)
except:
print "Session-Warning: unable to restore wizard."
return 1
|
gratefulfrog/lib
|
python/pymol/wizarding.py
|
Python
|
gpl-2.0
| 6,104
|
[
"PyMOL"
] |
067207b8ba948ace4af8b9ab48a160ef9f14b9a98bb317554b77d750d291a07a
|
"""Notifications for Android TV notification service."""
import base64
import io
import logging
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_HOST, CONF_TIMEOUT, HTTP_OK, PERCENTAGE
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_DURATION = "duration"
CONF_FONTSIZE = "fontsize"
CONF_POSITION = "position"
CONF_TRANSPARENCY = "transparency"
CONF_COLOR = "color"
CONF_INTERRUPT = "interrupt"
DEFAULT_DURATION = 5
DEFAULT_FONTSIZE = "medium"
DEFAULT_POSITION = "bottom-right"
DEFAULT_TRANSPARENCY = "default"
DEFAULT_COLOR = "grey"
DEFAULT_INTERRUPT = False
DEFAULT_TIMEOUT = 5
DEFAULT_ICON = (
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGP6zwAAAgcBApo"
"cMXEAAAAASUVORK5CYII="
)
ATTR_DURATION = "duration"
ATTR_FONTSIZE = "fontsize"
ATTR_POSITION = "position"
ATTR_TRANSPARENCY = "transparency"
ATTR_COLOR = "color"
ATTR_BKGCOLOR = "bkgcolor"
ATTR_INTERRUPT = "interrupt"
ATTR_IMAGE = "filename2"
ATTR_FILE = "file"
# Attributes contained in file
ATTR_FILE_URL = "url"
ATTR_FILE_PATH = "path"
ATTR_FILE_USERNAME = "username"
ATTR_FILE_PASSWORD = "password"
ATTR_FILE_AUTH = "auth"
# Any other value or absence of 'auth' lead to basic authentication being used
ATTR_FILE_AUTH_DIGEST = "digest"
FONTSIZES = {"small": 1, "medium": 0, "large": 2, "max": 3}
POSITIONS = {
"bottom-right": 0,
"bottom-left": 1,
"top-right": 2,
"top-left": 3,
"center": 4,
}
TRANSPARENCIES = {
"default": 0,
f"0{PERCENTAGE}": 1,
f"25{PERCENTAGE}": 2,
f"50{PERCENTAGE}": 3,
f"75{PERCENTAGE}": 4,
f"100{PERCENTAGE}": 5,
}
COLORS = {
"grey": "#607d8b",
"black": "#000000",
"indigo": "#303F9F",
"green": "#4CAF50",
"red": "#F44336",
"cyan": "#00BCD4",
"teal": "#009688",
"amber": "#FFC107",
"pink": "#E91E63",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_DURATION, default=DEFAULT_DURATION): vol.Coerce(int),
vol.Optional(CONF_FONTSIZE, default=DEFAULT_FONTSIZE): vol.In(FONTSIZES.keys()),
vol.Optional(CONF_POSITION, default=DEFAULT_POSITION): vol.In(POSITIONS.keys()),
vol.Optional(CONF_TRANSPARENCY, default=DEFAULT_TRANSPARENCY): vol.In(
TRANSPARENCIES.keys()
),
vol.Optional(CONF_COLOR, default=DEFAULT_COLOR): vol.In(COLORS.keys()),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(int),
vol.Optional(CONF_INTERRUPT, default=DEFAULT_INTERRUPT): cv.boolean,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Notifications for Android TV notification service."""
remoteip = config.get(CONF_HOST)
duration = config.get(CONF_DURATION)
fontsize = config.get(CONF_FONTSIZE)
position = config.get(CONF_POSITION)
transparency = config.get(CONF_TRANSPARENCY)
color = config.get(CONF_COLOR)
interrupt = config.get(CONF_INTERRUPT)
timeout = config.get(CONF_TIMEOUT)
return NFAndroidTVNotificationService(
remoteip,
duration,
fontsize,
position,
transparency,
color,
interrupt,
timeout,
hass.config.is_allowed_path,
)
class NFAndroidTVNotificationService(BaseNotificationService):
"""Notification service for Notifications for Android TV."""
def __init__(
self,
remoteip,
duration,
fontsize,
position,
transparency,
color,
interrupt,
timeout,
is_allowed_path,
):
"""Initialize the service."""
self._target = f"http://{remoteip}:7676"
self._default_duration = duration
self._default_fontsize = fontsize
self._default_position = position
self._default_transparency = transparency
self._default_color = color
self._default_interrupt = interrupt
self._timeout = timeout
self._icon_file = io.BytesIO(base64.b64decode(DEFAULT_ICON))
self.is_allowed_path = is_allowed_path
def send_message(self, message="", **kwargs):
"""Send a message to a Android TV device."""
_LOGGER.debug("Sending notification to: %s", self._target)
payload = {
"filename": (
"icon.png",
self._icon_file,
"application/octet-stream",
{"Expires": "0"},
),
"type": "0",
"title": kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
"msg": message,
"duration": "%i" % self._default_duration,
"fontsize": "%i" % FONTSIZES.get(self._default_fontsize),
"position": "%i" % POSITIONS.get(self._default_position),
"bkgcolor": "%s" % COLORS.get(self._default_color),
"transparency": "%i" % TRANSPARENCIES.get(self._default_transparency),
"offset": "0",
"app": ATTR_TITLE_DEFAULT,
"force": "true",
"interrupt": "%i" % self._default_interrupt,
}
data = kwargs.get(ATTR_DATA)
if data:
if ATTR_DURATION in data:
duration = data.get(ATTR_DURATION)
try:
payload[ATTR_DURATION] = "%i" % int(duration)
except ValueError:
_LOGGER.warning("Invalid duration-value: %s", str(duration))
if ATTR_FONTSIZE in data:
fontsize = data.get(ATTR_FONTSIZE)
if fontsize in FONTSIZES:
payload[ATTR_FONTSIZE] = "%i" % FONTSIZES.get(fontsize)
else:
_LOGGER.warning("Invalid fontsize-value: %s", str(fontsize))
if ATTR_POSITION in data:
position = data.get(ATTR_POSITION)
if position in POSITIONS:
payload[ATTR_POSITION] = "%i" % POSITIONS.get(position)
else:
_LOGGER.warning("Invalid position-value: %s", str(position))
if ATTR_TRANSPARENCY in data:
transparency = data.get(ATTR_TRANSPARENCY)
if transparency in TRANSPARENCIES:
payload[ATTR_TRANSPARENCY] = "%i" % TRANSPARENCIES.get(transparency)
else:
_LOGGER.warning("Invalid transparency-value: %s", str(transparency))
if ATTR_COLOR in data:
color = data.get(ATTR_COLOR)
if color in COLORS:
payload[ATTR_BKGCOLOR] = "%s" % COLORS.get(color)
else:
_LOGGER.warning("Invalid color-value: %s", str(color))
if ATTR_INTERRUPT in data:
interrupt = data.get(ATTR_INTERRUPT)
try:
payload[ATTR_INTERRUPT] = "%i" % cv.boolean(interrupt)
except vol.Invalid:
_LOGGER.warning("Invalid interrupt-value: %s", str(interrupt))
filedata = data.get(ATTR_FILE) if data else None
if filedata is not None:
# Load from file or URL
file_as_bytes = self.load_file(
url=filedata.get(ATTR_FILE_URL),
local_path=filedata.get(ATTR_FILE_PATH),
username=filedata.get(ATTR_FILE_USERNAME),
password=filedata.get(ATTR_FILE_PASSWORD),
auth=filedata.get(ATTR_FILE_AUTH),
)
if file_as_bytes:
payload[ATTR_IMAGE] = (
"image",
file_as_bytes,
"application/octet-stream",
{"Expires": "0"},
)
try:
_LOGGER.debug("Payload: %s", str(payload))
response = requests.post(self._target, files=payload, timeout=self._timeout)
if response.status_code != HTTP_OK:
_LOGGER.error("Error sending message: %s", str(response))
except requests.exceptions.ConnectionError as err:
_LOGGER.error("Error communicating with %s: %s", self._target, str(err))
def load_file(
self, url=None, local_path=None, username=None, password=None, auth=None
):
"""Load image/document/etc from a local path or URL."""
try:
if url is not None:
# Check whether authentication parameters are provided
if username is not None and password is not None:
# Use digest or basic authentication
if ATTR_FILE_AUTH_DIGEST == auth:
auth_ = HTTPDigestAuth(username, password)
else:
auth_ = HTTPBasicAuth(username, password)
# Load file from URL with authentication
req = requests.get(url, auth=auth_, timeout=DEFAULT_TIMEOUT)
else:
# Load file from URL without authentication
req = requests.get(url, timeout=DEFAULT_TIMEOUT)
return req.content
if local_path is not None:
# Check whether path is whitelisted in configuration.yaml
if self.is_allowed_path(local_path):
return open(local_path, "rb") # pylint: disable=consider-using-with
_LOGGER.warning("'%s' is not secure to load data from!", local_path)
else:
_LOGGER.warning("Neither URL nor local path found in params!")
except OSError as error:
_LOGGER.error("Can't load from url or local path: %s", error)
return None
|
kennedyshead/home-assistant
|
homeassistant/components/nfandroidtv/notify.py
|
Python
|
apache-2.0
| 9,929
|
[
"Amber"
] |
71211b4f765c02b38d06c6a54346f4f022f648dab82664dda33fccc249e18ee0
|
#!/usr/bin/env python
"""Tests of code for OTU picking"""
__author__ = "Kyle Bittinger, Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
# remember to add yourself if you make changes
__credits__ = [
"Kyle Bittinger",
"Greg Caporaso",
"Rob Knight",
"Jens Reeder",
"William Walters",
"Jose Carlos Clemente Litran"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from os import remove, close
from os.path import abspath, join, exists, split
from shutil import rmtree
from tempfile import mkstemp, mkdtemp
from filecmp import cmp
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from skbio.sequence import DNA
from skbio.util import create_dir, remove_files
from bfillings.formatdb import build_blast_db_from_fasta_path
from bfillings.sortmerna_v2 import build_database_sortmerna
from qiime.util import get_qiime_temp_dir
from qiime.parse import fields_to_dict
from qiime.pick_otus import (CdHitOtuPicker, OtuPicker,
MothurOtuPicker, PrefixSuffixOtuPicker, TrieOtuPicker, BlastOtuPicker,
expand_otu_map_seq_ids, map_otu_map_files, UclustOtuPicker,
UclustReferenceOtuPicker, expand_failures, UsearchOtuPicker,
UsearchReferenceOtuPicker, get_blast_hits, BlastxOtuPicker,
Usearch610DeNovoOtuPicker, Usearch61ReferenceOtuPicker,
SumaClustOtuPicker, SortmernaV2OtuPicker, SwarmOtuPicker)
class OtuPickerTests(TestCase):
"""Tests of the abstract OtuPicker class"""
def test_init(self):
"""Abstract OtuPicker __init__ should store name, params"""
p = OtuPicker({})
self.assertEqual(p.Name, 'OtuPicker')
self.assertEqual(p.Params, {})
def test_call(self):
"""Abstract OtuPicker __call__ should raise NotImplementedError"""
p = OtuPicker({})
self.assertRaises(NotImplementedError, p, '/path/to/seqs')
def test_prefilter_exact_matches(self):
"""Abstract OtuPicker _prefilter_exact_matches functions as expected
"""
seqs = [('s1 comment1', 'ACCTTGTTACTTT'), # three copies
('s2 comment2', 'ACCTTGTTACTTTC'), # one copy
('s3 comment3', 'ACCTTGTTACTTTCC'), # two copies
('s4 comment4', 'ACCTTGTTACTTT'),
('s5 comment5', 'ACCTTGTTACTTTCC'),
('s6 comment6', 'ACCTTGTTACTTT')]
expected0 = [('QiimeExactMatch.s1', 'ACCTTGTTACTTT'),
('QiimeExactMatch.s2', 'ACCTTGTTACTTTC'),
('QiimeExactMatch.s3', 'ACCTTGTTACTTTCC')]
expected1 = {'QiimeExactMatch.s1': ['s1', 's4', 's6'],
'QiimeExactMatch.s2': ['s2'],
'QiimeExactMatch.s3': ['s3', 's5']}
expected = (expected0, expected1)
p = OtuPicker({})
actual = p._prefilter_exact_matches(seqs)
self.assertEqual(actual, expected)
class SortmernaV2OtuPickerTests(TestCase):
""" Tests for SortMeRNA (closed-reference) OTU picker """
def setUp(self):
self.output_dir = mkdtemp()
self.reference_seq_fp = sortmerna_reference_seqs_fp
self.read_seqs_fp = sortmerna_read_seqs_fp
self.otumap_fp = sortmerna_otumap_fp
self.failures_fp = sortmerna_failures_fp
# create temporary file with reference sequences defined
# in reference_seqs_fp
f, self.file_reference_seq_fp = mkstemp(prefix='temp_references_',
suffix='.fasta')
close(f)
# write _reference_ sequences to tmp file
with open(self.file_reference_seq_fp, 'w') as tmp:
tmp.write(self.reference_seq_fp)
# create temporary file with read sequences defined in read_seqs_fp
f, self.file_read_seqs_fp = mkstemp(prefix='temp_reads_',
suffix='.fasta')
close(f)
# write _read_ sequences to tmp file
with open(self.file_read_seqs_fp, 'w') as tmp:
tmp.write(self.read_seqs_fp)
# create temporary file with the OTU map (97% id)
f, self.file_otumap_fp = mkstemp(prefix='temp_otumap_',
suffix='.txt')
close(f)
# write expected OTU map to tmp file
with open(self.file_otumap_fp, 'w') as tmp:
tmp.write(self.otumap_fp)
# create a temporary file with failures
f, self.file_failures_fp = mkstemp(prefix='temp_failures_',
suffix='.txt')
with open(self.file_failures_fp, 'w') as tmp:
tmp.write(self.failures_fp)
self.result_path = '%s/%s_otus.txt' % (self.output_dir, 'temp_reads')
self.log_path = '%s/%s_otus.log' % (self.output_dir, 'temp_reads')
self.failure_path = '%s/%s_failures.txt' % (
self.output_dir, 'temp_reads')
# list of files to remove
self.files_to_remove = [self.file_reference_seq_fp,
self.file_read_seqs_fp,
self.file_otumap_fp,
self.file_failures_fp,
self.result_path,
self.log_path,
self.failure_path]
def tearDown(self):
remove_files(self.files_to_remove)
rmtree(self.output_dir)
def check_output(self,
clusters=None):
""" common function used to validate SortMeRNA's
output files for each test
"""
# clusters should be empty (written to file)
self.assertTrue(clusters is None)
# clusters OTU map exists
self.assertTrue(exists(self.result_path))
# failures output file exists
self.assertTrue(exists(self.failure_path))
# expected failures file matches expected
self.assertTrue(cmp(self.file_failures_fp, self.failure_path))
# log exists
self.assertTrue(exists(self.log_path))
def test_call_default_params_db_not_indexed(self):
""" clusters seqs within 97% identity with default parameters,
non-indexed database passed
"""
app = SortmernaV2OtuPicker(
params={'max_e_value': 1,
'similarity': 0.97,
'coverage': 0.97,
'threads': 1,
'blast': False,
'best': 1,
'max_pos': 250,
'prefilter_identical_sequences': True,
'otu_id_prefix': 'RefOTU'})
clusters = app(
seq_path=self.file_read_seqs_fp,
result_path=self.result_path,
log_path=self.log_path,
sortmerna_db=None,
refseqs_fp=self.file_reference_seq_fp,
failure_path=self.failure_path)
self.check_output(clusters)
# clusters OTU map is correct
self.assertTrue(cmp(self.file_otumap_fp, self.result_path))
def test_call_default_params_db_indexed(self):
""" clusters seqs within 97% identity with default parameters,
indexed database passed
"""
# rebuild the index
sortmerna_db, db_files_to_remove = build_database_sortmerna(
abspath(self.file_reference_seq_fp),
max_pos=250,
output_dir=self.output_dir)
# Files created by indexdb_rna to be deleted
self.files_to_remove.extend(db_files_to_remove)
app = SortmernaV2OtuPicker(
params={'max_e_value': 1,
'similarity': 0.97,
'coverage': 0.97,
'threads': 1,
'blast': False,
'best': 1,
'max_pos': 250,
'prefilter_identical_sequences': True,
'otu_id_prefix': 'RefOTU'})
clusters = app(
seq_path=self.file_read_seqs_fp,
result_path=self.result_path,
log_path=self.log_path,
sortmerna_db=sortmerna_db,
refseqs_fp=self.file_reference_seq_fp,
failure_path=self.failure_path)
self.check_output(clusters)
# clusters OTU map is correct
self.assertTrue(cmp(self.file_otumap_fp, self.result_path))
def test_call_no_dereplication(self):
""" clusters seqs within 97% identity with default parameters,
do not dereplicate the reads prior to alignment
"""
app = SortmernaV2OtuPicker(
params={'max_e_value': 1,
'similarity': 0.97,
'coverage': 0.97,
'threads': 1,
'blast': False,
'best': 1,
'max_pos': 250,
'prefilter_identical_sequences': False,
'otu_id_prefix': 'RefOTU'})
clusters = app(
seq_path=self.file_read_seqs_fp,
result_path=self.result_path,
log_path=self.log_path,
sortmerna_db=None,
refseqs_fp=self.file_reference_seq_fp,
failure_path=self.failure_path)
self.check_output(clusters)
# clusters OTU map is correct
self.assertTrue(cmp(self.file_otumap_fp, self.result_path))
class MothurOtuPickerTests(TestCase):
def setUp(self):
fd, self.small_seq_path = mkstemp(prefix='MothurOtuPickerTest_',
suffix='.fasta')
close(fd)
f = open(self.small_seq_path, 'w')
f.write(
'>aaaaaa\nTAGGCTCTGATATAATAGCTCTC---------\n'
'>cccccc\n------------TGACTACGCAT---------\n'
'>bbbbbb\n----TATCGCTTCGACGATTCTCTGATAGAGA\n'
)
f.close()
def tearDown(self):
remove(self.small_seq_path)
def test_call(self):
app = MothurOtuPicker({})
observed_otus = app(self.small_seq_path)
expected_otus = [['cccccc'], ['bbbbbb'], ['aaaaaa']]
assert_almost_equal(observed_otus.keys(),
[0, 1, 2])
self.assertItemsEqual(observed_otus.values(),
expected_otus)
def test_call_low_similarity(self):
app = MothurOtuPicker({'Similarity': 0.35})
observed_otus = app(self.small_seq_path)
expected_otus = [['bbbbbb', 'cccccc'], ['aaaaaa']]
assert_almost_equal(observed_otus.keys(),
[0, 1])
self.assertItemsEqual(observed_otus.values(),
expected_otus)
def test_call_nearest_neighbor(self):
app = MothurOtuPicker({'Algorithm': 'nearest', 'Similarity': 0.35})
observed_otus = app(self.small_seq_path)
expected_otus = [['bbbbbb', 'cccccc'], ['aaaaaa']]
self.assertItemsEqual(observed_otus.keys(),
[0, 1])
self.assertItemsEqual(observed_otus.values(),
expected_otus)
class SumaClustOtuPickerTests(TestCase):
""" Tests of the SumaClust de novo OTU picker """
def setUp(self):
self.output_dir = mkdtemp()
self.read_seqs = sumaclust_reads_seqs
# create temporary file with read sequences defined in read_seqs
f, self.file_read_seqs = mkstemp(prefix='temp_reads_',
suffix='.fasta')
close(f)
# write read sequences to tmp file
with open(self.file_read_seqs, 'w') as tmp:
tmp.write(self.read_seqs)
# create temporary file with final OTU map
f, self.file_otumap = mkstemp(prefix='temp_otumap',
suffix='.txt')
close(f)
self.result_path = '%s/%s_otus.txt' % (self.output_dir, 'temp_reads')
self.log_path = '%s/%s_otus.log' % (self.output_dir, 'temp_reads')
# list of files to remove
self.files_to_remove = [self.file_read_seqs,
self.file_otumap,
self.result_path,
self.log_path]
def tearDown(self):
remove_files(self.files_to_remove)
rmtree(self.output_dir)
def check_clusters(self,
clusters=None):
# Check the OTU map was output with the correct size
self.assertTrue(exists(self.result_path))
# Place actual clusters in a list of lists
actual_clusters = [line.strip().split('\t')[1:]
for line in open(self.result_path, 'U')]
actual_clusters.sort()
# Check the returned clusters list of lists is as expected
expected_clusters = [['s1_844', 's1_1886', 's1_5347', 's1_5737',
's1_7014', 's1_7881', 's1_7040', 's1_6200',
's1_1271', 's1_8615'],
['s1_8977', 's1_10439', 's1_12366', 's1_15985',
's1_21935', 's1_11650', 's1_11001', 's1_8592',
's1_14735', 's1_4677'],
['s1_630', 's1_4572', 's1_5748', 's1_13961',
's1_2369', 's1_3750', 's1_7634', 's1_8623',
's1_8744', 's1_6846']]
expected_clusters.sort()
# Should be 3 clusters
self.assertEqual(len(actual_clusters), 3)
# List of actual clusters matches list of expected clusters
for actual_cluster, expected_cluster in zip(actual_clusters,
expected_clusters):
actual_cluster.sort()
expected_cluster.sort()
self.assertEqual(actual_cluster, expected_cluster)
def test_call_default_params(self):
""" SumaClust should return an OTU map
with content identical to the expected OTU map,
sequences are de-replicated prior to
clustering
"""
app = SumaClustOtuPicker(
params={'similarity': 0.97,
'exact': False,
'threads': 1,
'l': True,
'prefilter_identical_sequences':
True,
'denovo_otu_id_prefix':
'DenovoOTU'})
clusters = app(seq_path=self.file_read_seqs,
result_path=self.result_path,
log_path=self.log_path)
self.check_clusters(clusters)
def test_call_no_dereplication(self):
""" SumaClust should return an OTU map
with content identical to the expected OTU map,
sequences are _not_ de-replicated prior to
clustering
"""
app = SumaClustOtuPicker(
params={'similarity': 0.97,
'exact': False,
'threads': 1,
'l': True,
'prefilter_identical_sequences':
False,
'denovo_otu_id_prefix':
'DenovoOTU'})
clusters = app(seq_path=self.file_read_seqs,
result_path=self.result_path,
log_path=self.log_path)
self.check_clusters(clusters)
def test_call_no_otu_id_prefix(self):
""" SumaClust should return an OTU map
with content identical to the expected OTU map,
resulting clusters do not have an assigned
sumaclust_otu_id_prefix
"""
app = SumaClustOtuPicker(
params={'similarity': 0.97,
'exact': False,
'threads': 1,
'l': True,
'prefilter_identical_sequences':
True,
'denovo_otu_id_prefix': None})
clusters = app(seq_path=self.file_read_seqs,
result_path=self.result_path,
log_path=self.log_path)
self.check_clusters(clusters)
class SwarmOtuPickerTests(TestCase):
""" Tests of the Swarm de novo OTU picker """
def setUp(self):
self.output_dir = mkdtemp()
# use same reads for clustering as for SumaClust
self.read_seqs = sumaclust_reads_seqs
# create temporary file with read sequences defined in read_seqs
f, self.file_read_seqs = mkstemp(prefix='temp_reads_',
suffix='.fasta')
close(f)
# write read sequences to tmp file
with open(self.file_read_seqs, 'w') as tmp:
tmp.write(self.read_seqs)
self.result_path = '%s/%s_otus.txt' % (self.output_dir, 'temp_reads')
self.log_path = '%s/%s_otus.log' % (self.output_dir, 'temp_reads')
# list of files to remove
self.files_to_remove = [self.file_read_seqs,
self.result_path,
self.log_path]
def tearDown(self):
remove_files(self.files_to_remove)
rmtree(self.output_dir)
def check_clusters(self,
otu_map=None):
actual_clusters = list(otu_map.values())
actual_clusters.sort()
# Check the returned clusters list of lists is as expected
expected_clusters = [['s1_844', 's1_1886', 's1_5347', 's1_5737',
's1_7014', 's1_7881', 's1_7040', 's1_6200',
's1_1271', 's1_8615'],
['s1_8977', 's1_10439', 's1_12366', 's1_15985',
's1_21935', 's1_11650', 's1_11001', 's1_8592',
's1_14735', 's1_4677'],
['s1_630', 's1_4572', 's1_5748', 's1_13961',
's1_2369', 's1_3750', 's1_7634', 's1_8623',
's1_8744', 's1_6846']]
expected_clusters.sort()
# Should be 3 clusters
self.assertEqual(len(actual_clusters), 3)
# List of actual clusters matches list of expected clusters
for actual_cluster, expected_cluster in zip(actual_clusters,
expected_clusters):
actual_cluster.sort()
expected_cluster.sort()
self.assertEqual(actual_cluster, expected_cluster)
def test_call_default_params(self):
""" Swarm should return an OTU map
with content identical to the expected OTU map,
sequences are de-replicated prior to
clustering
"""
app = SwarmOtuPicker(
params={'resolution': 1,
'threads': 1,
'prefilter_identical_sequences':
True,
'denovo_otu_id_prefix':
'denovo'})
clusters = app(seq_path=self.file_read_seqs,
result_path=self.result_path,
log_path=self.log_path)
# resulting clusters are written to result_path
self.assertTrue(clusters is None)
otu_map = fields_to_dict(open(self.result_path))
# Check denovo0, denovo1 and denovo2 are the
# cluster names
self.assertTrue('denovo0' in otu_map,
'de novo OTU (denovo0) is not in the final OTU map.')
self.assertTrue('denovo1' in otu_map,
'de novo OTU (denovo1) is not in the final OTU map.')
self.assertTrue('denovo2' in otu_map,
'de novo OTU (denovo2) is not in the final OTU map.')
self.check_clusters(otu_map)
def test_no_otu_id_prefix(self):
""" Swarm should return an OTU map
with content identical to the expected OTU map,
sequences are de-replicated prior to
clustering
"""
app = SwarmOtuPicker(
params={'resolution': 1,
'threads': 1,
'prefilter_identical_sequences':
True,
'denovo_otu_id_prefix': None})
clusters = app(seq_path=self.file_read_seqs,
result_path=self.result_path,
log_path=self.log_path)
# resulting clusters are written to result_path
self.assertTrue(clusters is None)
otu_map = fields_to_dict(open(self.result_path))
# Check 0, 1 and 2 are the
# cluster names
self.assertTrue('0' in otu_map,
'de novo OTU (0) is not in the final OTU map.')
self.assertTrue('1' in otu_map,
'de novo OTU (1) is not in the final OTU map.')
self.assertTrue('2' in otu_map,
'de novo OTU (2) is not in the final OTU map.')
self.check_clusters(otu_map)
class BlastxOtuPickerTests(TestCase):
""" Tests of the blastx-based otu picker """
def setUp(self):
"""
"""
self.otu_picker = BlastxOtuPicker({'max_e_value': 0.001})
self.seqs = [
('s0 some description', 'CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC'),
('s1', 'TGCAGCTTGAGCCACAGGAGAGAGAGAGCTTC'),
('s2', 'TGCAGCTTGAGCCACAGGAGAGAGCCTTC'),
('s3', 'TGCAGCTTGAGCCACAGGAGAGAGAGAGCTTC'),
('s4', 'ACCGATGAGATATTAGCACAGGGGAATTAGAACCA'),
('s5', 'TGTCGAGAGTGAGATGAGATGAGAACA'),
('s6', 'ACGTATTTTAATTTGGCATGGT'),
('s7', 'TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT'),
]
self.ref_seqs_pr = [
('ref1', 'CSLSHRRERA'),
('ref2', 'TDEILAQGN'),
('ref3', 'CRE'),
('ref4', 'TYFNGAW'),
('ref5', 'RATGEREL'),
]
fd, self.seqs_fp = mkstemp(prefix='BlastOtuPickerTest_',
suffix='.fasta')
close(fd)
fd, self.reference_seqs_pr_fp = mkstemp(prefix='BlastOtuPickerTest_',
suffix='.fasta')
close(fd)
f = open(self.seqs_fp, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in self.seqs]))
f.close()
f = open(self.reference_seqs_pr_fp, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in self.ref_seqs_pr]))
f.close()
self.blast_db_pr, self.pr_db_files_to_remove = \
build_blast_db_from_fasta_path(self.reference_seqs_pr_fp,
is_protein=True)
self._files_to_remove = self.pr_db_files_to_remove +\
[self.seqs_fp,
self.reference_seqs_pr_fp]
def tearDown(self):
"""
"""
remove_files(self._files_to_remove, error_on_missing=False)
def test_get_blast_hits_blastx(self):
"""get_blast_hits functions as expected with blastx """
actual = get_blast_hits(
self.seqs,
self.blast_db_pr,
max_e_value=0.01,
min_pct_identity=0.5,
min_aligned_percent=0.5,
blast_program='blastx')
# couple of sanity checks against command line blast
self.assertEqual(len(actual['s3']), 2)
self.assertEqual(actual['s3'][0]['SUBJECT ID'], 'ref1')
self.assertEqual(actual['s3'][1]['SUBJECT ID'], 'ref5')
# increase stringency reduces number of blast hits
actual = get_blast_hits(
self.seqs,
self.blast_db_pr,
max_e_value=0.001,
min_pct_identity=0.5,
min_aligned_percent=0.5,
blast_program='blastx')
# couple of sanity checks against command line blast
self.assertEqual(len(actual['s3']), 1)
self.assertEqual(actual['s3'][0]['SUBJECT ID'], 'ref1')
def test_call(self):
"""BLASTX OTU Picker functions as expected
"""
expected = {'ref1': ['s3', 's2', 's1'],
'ref2': ['s4']}
actual = self.otu_picker(self.seqs_fp,
refseqs_fp=self.reference_seqs_pr_fp)
self.assertEqual(actual, expected)
class BlastOtuPickerTests(TestCase):
""" Tests of the blast-based otu picker """
def setUp(self):
"""
"""
self.otu_picker = BlastOtuPicker({'max_e_value': 1e-3})
self.seqs = [
('s0 some description', 'CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC'),
('s1', 'TGCAGCTTGAGCCACAGGAGAGAGAGAGCTTC'),
('s2', 'TGCAGCTTGAGCCACAGGAGAGAGCCTTC'),
('s3', 'TGCAGCTTGAGCCACAGGAGAGAGAGAGCTTC'),
('s4', 'ACCGATGAGATATTAGCACAGGGGAATTAGAACCA'),
('s5', 'TGTCGAGAGTGAGATGAGATGAGAACA'),
('s6', 'ACGTATTTTAATTTGGCATGGT'),
('s7', 'TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT'),
]
self.ref_seqs = [
('ref1', 'TGCAGCTTGAGCCACAGGAGAGAGAGAGCTTC'),
('ref2', 'ACCGATGAGATATTAGCACAGGGGAATTAGAACCA'),
('ref3', 'TGTCGAGAGTGAGATGAGATGAGAACA'),
('ref4', 'ACGTATTTTAATGGGGCATGGT'),
('ref5', 'AGAGCCACAGGAGAGAGAGAGCTTC'),
]
self.ref_seqs_rc = [
('ref1', str(DNA('TGCAGCTTGAGCCACAGGAGAGAGAGAGCTTC').rc())),
('ref2', str(DNA('ACCGATGAGATATTAGCACAGGGGAATTAGAACCA').rc())),
('ref3', str(DNA('TGTCGAGAGTGAGATGAGATGAGAACA').rc())),
('ref4', str(DNA('ACGTATTTTAATGGGGCATGGT').rc())),
]
fd, self.seqs_fp = mkstemp(prefix='BlastOtuPickerTest_',
suffix='.fasta')
close(fd)
fd, self.reference_seqs_fp = mkstemp(prefix='BlastOtuPickerTest_',
suffix='.fasta')
close(fd)
fd, self.reference_seqs_rc_fp = mkstemp(prefix='BlastOtuPickerTest_',
suffix='.fasta')
close(fd)
f = open(self.seqs_fp, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in self.seqs]))
f.close()
f = open(self.reference_seqs_fp, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in self.ref_seqs]))
f.close()
f = open(self.reference_seqs_rc_fp, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in self.ref_seqs_rc]))
f.close()
self.blast_db, self.db_files_to_remove = \
build_blast_db_from_fasta_path(self.reference_seqs_fp)
self._files_to_remove = self.db_files_to_remove +\
[self.seqs_fp,
self.reference_seqs_fp,
self.reference_seqs_rc_fp]
def tearDown(self):
"""
"""
remove_files(self._files_to_remove, error_on_missing=False)
def test_blast_seqs(self):
""" blast_seqs: functions as expected
"""
blast_db, db_files_to_remove = \
build_blast_db_from_fasta_path(self.reference_seqs_fp)
self._files_to_remove += db_files_to_remove
self.otu_picker.blast_db = blast_db
actual_clusters, actual_failures =\
self.otu_picker._blast_seqs(self.seqs)
for v in actual_clusters.values():
v.sort()
actual_failures.sort()
expected_clusters = {'ref1': ['s1', 's2', 's3'], 'ref2': ['s4'],
'ref3': ['s5']}
expected_failures = ['s0', 's6', 's7']
self.assertEqual(actual_clusters, expected_clusters)
self.assertEqual(actual_failures, expected_failures)
def test_update_cluster_map(self):
"""update_cluster_map: functions as expected
"""
# nothing in original cm
cm = {}
new_cm = {'c1': ['1', '2', '5'], 'c2': ['4', '3']}
expected = new_cm
actual = self.otu_picker._update_cluster_map(cm, new_cm)
self.assertEqual(actual, expected)
# no new clusters
cm = {'c1': ['1', '2', '5'], 'c2': ['4', '3']}
new_cm = {}
expected = cm
actual = self.otu_picker._update_cluster_map(cm, new_cm)
self.assertEqual(actual, expected)
# overlapping clusters
cm = {'c1': ['1', '2', '5'], 'c2': ['4', '3']}
new_cm = {'c1': ['8'], 'c2': ['10', '14'], '3': ['42']}
expected = {'c1': ['1', '2', '5', '8'], 'c2':
['4', '3', '10', '14'], '3': ['42']}
actual = self.otu_picker._update_cluster_map(cm, new_cm)
self.assertEqual(actual, expected)
# no duplicate seq_id checking
cm = {'c1': ['1']}
new_cm = cm
expected = {'c1': ['1', '1']}
actual = self.otu_picker._update_cluster_map(cm, new_cm)
self.assertEqual(actual, expected)
# no clusters at all
actual = self.otu_picker._update_cluster_map({}, {})
self.assertEqual(actual, {})
def test_get_blast_hits_blastn(self):
"""get_blast_hits functions as expected with blastn """
actual = get_blast_hits(
self.seqs,
self.blast_db,
max_e_value=1e-10,
min_pct_identity=0.5,
min_aligned_percent=0.5)
# couple of sanity checks against command line blast
self.assertEqual(len(actual['s3']), 2)
self.assertEqual(actual['s3'][0]['SUBJECT ID'], 'ref1')
self.assertEqual(actual['s3'][1]['SUBJECT ID'], 'ref5')
# increase stringency reduces number of blast hits
actual = get_blast_hits(
self.seqs,
self.blast_db,
max_e_value=1e-10,
min_pct_identity=0.5,
min_aligned_percent=0.8)
# couple of sanity checks against command line blast
self.assertEqual(len(actual['s3']), 1)
self.assertEqual(actual['s3'][0]['SUBJECT ID'], 'ref1')
def test_call(self):
"""BLAST OTU Picker functions as expected
"""
expected = {'ref1': ['s3', 's2', 's1'],
'ref2': ['s4'],
'ref3': ['s5']}
actual = self.otu_picker(self.seqs_fp,
refseqs_fp=self.reference_seqs_fp)
self.assertEqual(actual, expected)
def test_call_alt_min_aligned_length(self):
"""BLAST OTU picker handles alt min_aligned_percent values """
# first 12 bases match perfect, and no alignment from there
seqs = [('s1', 'TGCAGCTTGAGCGTTGTTACCGCTTT')]
ref_seqs = [
('r1', 'TGCAGCTTGAGCCACGCCGAATAGCCGAGTTTGACCGGGCCCAGGAGGAGAGAGAGAGCTTC')]
fd, seqs_fp = mkstemp(prefix='BlastOtuPickerTest_', suffix='.fasta')
close(fd)
fd, reference_seqs_fp = mkstemp(prefix='BlastOtuPickerTest_',
suffix='.fasta')
close(fd)
f = open(seqs_fp, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in seqs]))
f.close()
f = open(reference_seqs_fp, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in ref_seqs]))
f.close()
self._files_to_remove.append(seqs_fp)
self._files_to_remove.append(reference_seqs_fp)
# with low min_aligned_percent s1 matches r1
otu_picker = BlastOtuPicker({'max_e_value': 1e-3,
'min_aligned_percent': 0.10})
expected = {'r1': ['s1']}
actual = otu_picker(seqs_fp,
refseqs_fp=reference_seqs_fp)
self.assertEqual(actual, expected)
# with min_aligned_percent s1 doesn't match r1
otu_picker = BlastOtuPicker({'max_e_value': 1e-3,
'min_aligned_percent': 0.50})
expected = {}
actual = otu_picker(seqs_fp,
refseqs_fp=reference_seqs_fp)
self.assertEqual(actual, expected)
def test_call_rc(self):
"""BLAST OTU picker: RC seqs cluster to same OTU as forward orientation
"""
expected = {'ref1': ['s3', 's2', 's1'],
'ref2': ['s4'],
'ref3': ['s5']}
actual = self.otu_picker(self.seqs_fp,
refseqs_fp=self.reference_seqs_rc_fp)
self.assertEqual(actual, expected)
def test_call_alt_params(self):
"""BLAST OTU Picker functions as expected with alt params
"""
otu_picker = BlastOtuPicker({'max_e_value': 1e-30})
expected = {}
actual = otu_picker(self.seqs_fp,
refseqs_fp=self.reference_seqs_fp)
self.assertEqual(actual, expected)
self.otu_picker = BlastOtuPicker(
{'max_e_value': 1e-3, 'Similarity': 0.90})
expected_90 = {'ref1': ['s3', 's2', 's1'],
'ref2': ['s4'],
'ref3': ['s5'],
'ref4': ['s6']}
actual = self.otu_picker(self.seqs_fp,
refseqs_fp=self.reference_seqs_fp)
self.assertEqual(actual, expected_90)
def test_call_preexisting_blast_db(self):
"""BLAST OTU Picker functions w preexisting blast db
"""
blast_db, db_files_to_remove = \
build_blast_db_from_fasta_path(self.reference_seqs_fp)
self._files_to_remove += db_files_to_remove
expected = {'ref1': ['s3', 's2', 's1'],
'ref2': ['s4'],
'ref3': ['s5']}
actual = self.otu_picker(self.seqs_fp, blast_db=blast_db)
self.assertItemsEqual(actual, expected)
def test_call_multiple_blast_runs(self):
"""BLAST OTU Picker not affected by alt SeqsPerBlastRun
"""
expected = {'ref1': ['s1', 's2', 's3'],
'ref2': ['s4'],
'ref3': ['s5']}
for v in expected.values():
v.sort()
for SeqsPerBlastRun in [1, 2, 4, 6, 7, 8, 100]:
self.otu_picker.Params['seqs_per_blast_run'] \
= SeqsPerBlastRun
actual = self.otu_picker(self.seqs_fp,
refseqs_fp=self.reference_seqs_fp)
for v in actual.values():
v.sort()
self.assertEqual(actual, expected)
class PrefixSuffixOtuPickerTests(TestCase):
""" Tests of the prefix/suffix-based OTU picker """
def setUp(self):
"""
"""
self.otu_picker = PrefixSuffixOtuPicker({})
self.seqs = [
('s1 some description', 'ACGTAATGGT'),
('s2', 'ATTTAATGGT'),
('s3', 'ACGTAATTTT'),
('s4', 'AAATAAAAA'),
('s5', 'ACGTTGGT'),
('s6', 'ACGTATTTTAATTTGGCATGGT'),
]
fd, self.small_seq_path = mkstemp(prefix='PrefixSuffixOtuPickerTest_',
suffix='.fasta')
close(fd)
self._files_to_remove = [self.small_seq_path]
f = open(self.small_seq_path, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in self.seqs]))
f.close()
def tearDown(self):
"""
"""
remove_files(self._files_to_remove)
def test_call(self):
"""Prefix/suffix OTU Picker functions as expected
"""
expected = {3: ['s1', 's5', 's6'],
1: ['s2'],
0: ['s3'],
2: ['s4']}
actual = self.otu_picker(self.small_seq_path,
prefix_length=4, suffix_length=4)
self.assertEqual(actual, expected)
def test_call_extra_long_lengths(self):
"""Prefix/suffix OTU Picker functions as expected
"""
seqs = [
('s1 some description',
'ACGTAATGGTCCCCCCCCCGGGGGGGGCCCCCCGGG'),
('s2', 'ATTTAATGGT'),
('s3', 'ACGTAATTTT'),
('s4', 'AAATAAAAA'),
('s5', 'ACGTTGGT'),
('s6', 'ACGTATTTTAATTTGGCATGGT'),
('s7', 'ACGTATTTTAATTTGGCATGG'),
('s1_dup',
'ACGTAATGGTCCCCCCCCCGGGGGGGGCCCCCCGGG'),
('s2_dup', 'ATTTAATGGT'),
]
fd, seq_path = mkstemp(prefix='PrefixSuffixOtuPickerTest_',
suffix='.fasta')
close(fd)
self._files_to_remove.append(seq_path)
f = open(seq_path, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in seqs]))
f.close()
expected = {1: ['s1', 's1_dup'],
2: ['s2', 's2_dup'],
3: ['s3'],
4: ['s4'],
5: ['s5'],
6: ['s6'],
7: ['s7']}
# long prefix collapses identical sequences
actual = self.otu_picker(seq_path,
prefix_length=400, suffix_length=0)
actual_clusters = actual.values()
expected_clusters = expected.values()
self.assertItemsEqual(actual_clusters, expected_clusters)
# long suffixes collapses identical sequences
actual = self.otu_picker(seq_path,
prefix_length=0, suffix_length=400)
actual_clusters = actual.values()
expected_clusters = expected.values()
self.assertItemsEqual(actual_clusters, expected_clusters)
# long prefix and suffixes collapses identical sequences
actual = self.otu_picker(seq_path,
prefix_length=400, suffix_length=400)
actual_clusters = actual.values()
expected_clusters = expected.values()
self.assertItemsEqual(actual_clusters, expected_clusters)
def test_collapse_exact_matches_prefix_and_suffix(self):
"""Prefix/suffix: collapse_exact_matches fns with pref/suf len > 0
"""
expected = [['s1', 's5', 's6'], ['s2'], ['s3'], ['s4']]
actual = sorted(
self.otu_picker._collapse_exact_matches(self.seqs, 4, 4))
expected.sort()
self.assertEqual(actual, expected)
expected = [['s1', 's2', 's3', 's5', 's6'], ['s4']]
actual = self.otu_picker._collapse_exact_matches(self.seqs, 1, 1)
actual.sort()
expected.sort()
self.assertEqual(actual, expected)
def test_collapse_exact_matches_prefix_zero(self):
"""Prefix/suffix: collapse_exact_matches fns with prefix len = 0
"""
expected = [['s1', 's2', 's5', 's6'], ['s3'], ['s4']]
actual = sorted(
self.otu_picker._collapse_exact_matches(self.seqs, 0, 4))
expected.sort()
self.assertEqual(actual, expected)
expected = [['s1', 's2', 's3', 's5', 's6'], ['s4']]
actual = self.otu_picker._collapse_exact_matches(self.seqs, 0, 1)
actual.sort()
expected.sort()
self.assertEqual(actual, expected)
def test_collapse_exact_matches_suffix_zero(self):
"""Prefix/suffix: collapse_exact_matches fns with suffix len = 0
"""
expected = [['s1', 's3', 's5', 's6'], ['s2'], ['s4']]
actual = sorted(
self.otu_picker._collapse_exact_matches(self.seqs, 4, 0))
expected.sort()
self.assertEqual(actual, expected)
expected = [['s1', 's2', 's3', 's4', 's5', 's6']]
actual = self.otu_picker._collapse_exact_matches(self.seqs, 1, 0)
actual.sort()
expected.sort()
self.assertEqual(actual, expected)
def test_build_seq_hash(self):
""" """
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 0, 0), '')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 2, 2), 'ATGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATTACGT', 2, 1), 'ATT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 1, 2), 'AGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 1, 1), 'AT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 4, 3), 'ATGTCGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 3, 4), 'ATGACGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 4, 4), 'ATGTACGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 5, 3), 'ATGTACGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 8, 0), 'ATGTACGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 3, 5), 'ATGTACGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 0, 8), 'ATGTACGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 4, 5), 'ATGTACGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 5, 4), 'ATGTACGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 300, 0), 'ATGTACGT')
self.assertEqual(self.otu_picker._build_seq_hash(
'ATGTACGT', 0, 300), 'ATGTACGT')
class TrieOtuPickerTests(TestCase):
""" Tests of the Trie-based OTU picker """
def setUp(self):
"""
"""
self.otu_picker = TrieOtuPicker({})
self.otu_picker_rev = TrieOtuPicker({'Reverse': True})
seqs = [
('s1 some description', 'ACGTAATGGT'),
('s2', 'ACGTATTTTAATTTGGCATGGT'),
('s3', 'ACGTAAT'),
('s4', 'ACGTA'),
('s5', 'ATTTAATGGT'),
('s6', 'ATTTAAT'),
('s7', 'AAATAAAAA')
]
seqs_rev = [
('s1 some description', 'TGGTAATGCA'),
('s2', 'TGGTACGGTTTAATTTTATGCA'),
('s3', 'TAATGCA'),
('s4', 'ATGCA'),
('s5', 'TGGTAATTTA'),
('s6', 'TAATTTA'),
('s7', 'AAAAATAAA')
]
fd, self.small_seq_path = mkstemp(prefix='TrieOtuPickerTest_',
suffix='.fasta')
close(fd)
self._files_to_remove = [self.small_seq_path]
f = open(self.small_seq_path, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in seqs]))
f.close()
fd, self.small_seq_path_rev = mkstemp(prefix='TrieOtuPickerTest_',
suffix='.fasta')
close(fd)
self._files_to_remove.append(self.small_seq_path_rev)
f = open(self.small_seq_path_rev, 'w')
f.write('\n'.join(['>%s\n%s' % s for s in seqs_rev]))
f.close()
def tearDown(self):
"""
"""
remove_files(self._files_to_remove)
def test_call(self):
"""Trie OTU Picker functions as expected
"""
expected = {0: ['s2'],
1: ['s3', 's4', 's1'],
2: ['s7'],
3: ['s6', 's5']}
actual = self.otu_picker(self.small_seq_path)
self.assertEqual(actual, expected)
def test_call_reverse(self):
"""Trie OTU Picker functions as expected with the 'Reverse' option
"""
expected = {0: ['s2'],
1: ['s3', 's4', 's1'],
2: ['s7'],
3: ['s6', 's5']}
actual = self.otu_picker_rev(self.small_seq_path_rev)
self.assertEqual(actual, expected)
class Usearch610DeNovoOtuPickerTests(TestCase):
""" Tests for usearch 6.1 de novo functionality """
def setUp(self):
# create the temporary input files
self.output_dir = get_qiime_temp_dir()
self.dna_seqs_usearch_97perc_id = dna_seqs_usearch_97perc_id
self.dna_seqs_usearch_97perc_id_rc = dna_seqs_usearch_97perc_id_rc
self.dna_seqs_usearch_97perc_id_len_diff =\
dna_seqs_usearch_97perc_id_len_diff
self.dna_seqs_usearch_97perc_dups = dna_seqs_usearch_97perc_dups
fd, self.tmp_seq_filepath_97perc_id = mkstemp(
prefix='Usearch610DeNovoOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath_97perc_id, 'w')
seq_file.write(self.dna_seqs_usearch_97perc_id)
seq_file.close()
fd, self.tmp_seq_filepath_97perc_id_rc = mkstemp(
prefix='Usearch610DeNovoOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath_97perc_id_rc, 'w')
seq_file.write(self.dna_seqs_usearch_97perc_id_rc)
seq_file.close()
fd, self.tmp_seqs_usearch97perc_id_len_diff = mkstemp(
prefix="Usearch610DeNovoOtuPickerTest_",
suffix=".fasta")
close(fd)
seq_file = open(self.tmp_seqs_usearch97perc_id_len_diff, "w")
seq_file.write(self.dna_seqs_usearch_97perc_id_len_diff)
seq_file.close()
fd, self.tmp_seqs_usearch_97perc_dups = mkstemp(
prefix="Usearch610DeNovoOtuPickerTest_",
suffix=".fasta")
close(fd)
seq_file = open(self.tmp_seqs_usearch_97perc_dups, "w")
seq_file.write(self.dna_seqs_usearch_97perc_dups)
seq_file.close()
self._files_to_remove =\
[self.tmp_seq_filepath_97perc_id, self.tmp_seq_filepath_97perc_id_rc,
self.tmp_seqs_usearch97perc_id_len_diff,
self.tmp_seqs_usearch_97perc_dups]
self._dirs_to_remove = []
def tearDown(self):
remove_files(self._files_to_remove)
if self._dirs_to_remove:
for curr_dir in self._dirs_to_remove:
rmtree(curr_dir)
def test_call_default_params(self):
""" clusters seqs within 97% identity with default parameters """
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': False,
'output_dir': self.output_dir,
'remove_usearch_logs': True
})
obs_clusters = app(self.tmp_seq_filepath_97perc_id)
# All seqs should fall into a single cluster
expected_clusters = {'denovo0': ['usearch_ecoli_seq',
'usearch_ecoli_seq_2bp_change', 'usearch_ecoli_seq_1bp_change']}
for result in obs_clusters:
for cluster in obs_clusters[result]:
self.assertTrue(cluster in expected_clusters[result])
def test_call_default_params_and_lower_id(self):
""" clusters seqs within 95% identity with default parameters """
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': False,
'output_dir': self.output_dir,
'remove_usearch_logs': True,
'percent_id': 0.95
})
obs_clusters = app(self.tmp_seq_filepath_97perc_id)
# All seqs should fall into a single cluster
expected_clusters = {'denovo0': ['usearch_ecoli_seq',
'usearch_ecoli_seq_2bp_change', 'usearch_ecoli_seq_1bp_change']}
self.assertItemsEqual(obs_clusters, expected_clusters)
def test_call_default_params_and_higher_id(self):
""" clusters seqs within 99% identity with default parameters """
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': False,
'output_dir': self.output_dir,
'remove_usearch_logs': True,
'percent_id': 0.99
})
obs_clusters = app(self.tmp_seq_filepath_97perc_id)
# Seqs should fall into separate clusters
expected_clusters = {'denovo0': ['usearch_ecoli_seq'],
'denovo1': ['usearch_ecoli_seq_2bp_change'],
'denovo2': ['usearch_ecoli_seq_1bp_change']}
# should be exactly 3 clusters
self.assertEqual(len(obs_clusters), 3)
self.assertItemsEqual(obs_clusters.keys(), expected_clusters.keys())
self.assertItemsEqual(
obs_clusters.values(),
expected_clusters.values())
def test_call_default_params_reversed_seq(self):
""" Does not cluster reverse complemented sequence without --rev """
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': False,
'output_dir': self.output_dir,
'remove_usearch_logs': True
})
obs_clusters = app(self.tmp_seq_filepath_97perc_id_rc)
# RC seq should fall into its own cluster
expected_clusters = [set(['usearch_ecoli_seq',
'usearch_ecoli_seq_1bp_change']),
set(['usearch_ecoli_seq_2bp_change_rc'])]
self.assertEqual(len(obs_clusters), 2)
for result in obs_clusters:
self.assertTrue(set(obs_clusters[result]) in expected_clusters)
def test_call_default_params_reversed_seq_w_rev(self):
""" Does not cluster reverse complemented sequence without --rev """
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': False,
'output_dir': self.output_dir,
'remove_usearch_logs': True,
'rev': True
})
obs_clusters =\
set(app(self.tmp_seq_filepath_97perc_id_rc).values()[0])
# All seqs should fall into a single cluster
expected_clusters = set(['usearch_ecoli_seq',
'usearch_ecoli_seq_2bp_change_rc', 'usearch_ecoli_seq_1bp_change'])
self.assertEqual(obs_clusters, expected_clusters)
def test_call_default_params_save_intermediate_files(self):
""" Preserves files if save_intermediate_files/logs is True """
intermediate_files_dir = self.output_dir + "/test_usearch61/"
create_dir(intermediate_files_dir)
self._dirs_to_remove.append(intermediate_files_dir)
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': True,
'output_dir':
intermediate_files_dir,
'remove_usearch_logs': False
})
obs_clusters = app(self.tmp_seq_filepath_97perc_id)
expected_intermediate_fps =\
[intermediate_files_dir + "denovo_abundance_sorted.fna",
intermediate_files_dir + "denovo_abundance_sorted.uc",
intermediate_files_dir + "denovo_smallmem_clustered.uc",
intermediate_files_dir + "abundance_sorted.log",
intermediate_files_dir + "smallmem_clustered.log"]
for curr_file in expected_intermediate_fps:
self.assertTrue(exists(curr_file))
def test_call_default_params_save_intermediate_files_fast_cluster(self):
""" Preserves files if save_intermediate_files/logs is True """
intermediate_files_dir = self.output_dir + "/test_usearch61_fast/"
create_dir(intermediate_files_dir)
self._dirs_to_remove.append(intermediate_files_dir)
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': True,
'output_dir':
intermediate_files_dir,
'remove_usearch_logs': False,
'usearch61_sort_method':
'length',
'usearch_fast_cluster': True
})
obs_clusters = app(self.tmp_seq_filepath_97perc_id)
expected_intermediate_fps =\
[intermediate_files_dir + "denovo_fast_clustered.uc",
intermediate_files_dir + "fast_clustered.log"]
for curr_file in expected_intermediate_fps:
self.assertTrue(exists(curr_file))
# All seqs should fall into a single cluster
expected_clusters = {'denovo0': ['usearch_ecoli_seq',
'usearch_ecoli_seq_1bp_change', 'usearch_ecoli_seq_2bp_change']}
self.assertItemsEqual(obs_clusters, expected_clusters)
def test_call_default_params_minlen(self):
""" Discards reads that fall below minlen setting """
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': False,
'output_dir': self.output_dir,
'remove_usearch_logs': True,
'minlen': 101
})
obs_clusters = app(self.tmp_seq_filepath_97perc_id)
# Should get no results
expected_clusters = {}
self.assertEqual(obs_clusters, expected_clusters)
def test_usearch61_params(self):
""" usearch61 handles changes to other parameters """
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': False,
'output_dir': self.output_dir,
'remove_usearch_logs': True,
'wordlength': 25,
'usearch61_maxrejects': 200,
'usearch61_maxaccepts': 5
})
obs_clusters = app(self.tmp_seq_filepath_97perc_id, otu_prefix="test")
# All seqs should fall into a single cluster
expected_clusters = {'test0': ['usearch_ecoli_seq',
'usearch_ecoli_seq_2bp_change', 'usearch_ecoli_seq_1bp_change']}
self.assertItemsEqual(obs_clusters, expected_clusters)
def test_usearch61_length_sorting(self):
""" Sorting according to length, clusters seqs """
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': False,
'output_dir': self.output_dir,
'remove_usearch_logs': True,
'usearch61_sort_method':
'length'
})
obs_clusters = app(self.tmp_seqs_usearch97perc_id_len_diff)
# All seqs should fall into a single cluster
expected_clusters = {'denovo0': ['usearch_ecoli_seq_2bp_change',
'usearch_ecoli_seq_1bp_change', 'usearch_ecoli_seq']}
self.assertEqual(obs_clusters, expected_clusters)
def test_usearch61_sizeorder(self):
""" Handles sizeorder option, clusters seqs """
app = Usearch610DeNovoOtuPicker(
params={'save_intermediate_files': False,
'output_dir': self.output_dir,
'remove_usearch_logs': True,
'sizeorder': True
})
obs_clusters = app(self.tmp_seqs_usearch_97perc_dups)
# All seqs should fall into a single cluster
expected_clusters = {'denovo0': ['usearch_ecoli_seq_1bp_change',
'usearch_ecoli_seq_2bp_change', 'usearch_ecoli_seq',
'usearch_ecoli_seq_1bp_change_dup1',
'usearch_ecoli_seq_1bp_change_dup2']}
self.assertEqual(obs_clusters, expected_clusters)
class Usearch61ReferenceOtuPickerTests(TestCase):
""" Tests for usearch 6.1 reference functionality """
def setUp(self):
# create the temporary input files
self.output_dir = get_qiime_temp_dir()
self.dna_seqs_usearch_97perc_id = dna_seqs_usearch_97perc_id
self.dna_seqs_usearch_97perc_id_rc = dna_seqs_usearch_97perc_id_rc
self.dna_seqs_usearch_97perc_id_len_diff =\
dna_seqs_usearch_97perc_id_len_diff
self.dna_seqs_usearch_97perc_dups = dna_seqs_usearch_97perc_dups
self.dna_seqs_rc_single_seq = dna_seqs_rc_single_seq
fd, self.tmp_seq_filepath_97perc_id = mkstemp(
prefix='Usearch610DeNovoOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath_97perc_id, 'w')
seq_file.write(self.dna_seqs_usearch_97perc_id)
seq_file.close()
fd, self.tmp_seq_filepath_97perc_id_rc = mkstemp(
prefix='Usearch610DeNovoOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath_97perc_id_rc, 'w')
seq_file.write(self.dna_seqs_usearch_97perc_id_rc)
seq_file.close()
fd, self.tmp_seqs_usearch97perc_id_len_diff = mkstemp(
prefix="Usearch610DeNovoOtuPickerTest_",
suffix=".fasta")
close(fd)
seq_file = open(self.tmp_seqs_usearch97perc_id_len_diff, "w")
seq_file.write(self.dna_seqs_usearch_97perc_id_len_diff)
seq_file.close()
fd, self.tmp_seqs_usearch_97perc_dups = mkstemp(
prefix="Usearch610DeNovoOtuPickerTest_",
suffix=".fasta")
close(fd)
seq_file = open(self.tmp_seqs_usearch_97perc_dups, "w")
seq_file.write(self.dna_seqs_usearch_97perc_dups)
seq_file.close()
fd, self.tmp_seqs_rc_single_seq = mkstemp(
prefix="Usearch610DeNovoOtuPickerTest_",
suffix=".fasta")
close(fd)
seq_file = open(self.tmp_seqs_rc_single_seq, "w")
seq_file.write(self.dna_seqs_rc_single_seq)
seq_file.close()
self._files_to_remove =\
[self.tmp_seq_filepath_97perc_id, self.tmp_seq_filepath_97perc_id_rc,
self.tmp_seqs_usearch97perc_id_len_diff,
self.tmp_seqs_usearch_97perc_dups, self.tmp_seqs_rc_single_seq]
self._dirs_to_remove = []
def tearDown(self):
remove_files(self._files_to_remove)
if self._dirs_to_remove:
for curr_dir in self._dirs_to_remove:
rmtree(curr_dir)
def test_call_default_params(self):
""" clusters seqs within 97% identity with default parameters """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
refseqs_fp=self.tmp_seq_filepath_97perc_id_rc)
# Randomly selected match is used for equivalent matches, so need to
# test for results without order affecting output
expected_clusters =\
{'usearch_ecoli_seq': ['usearch_ecoli_seq'],
'usearch_ecoli_seq_1bp_change': ['usearch_ecoli_seq_1bp_change',
'usearch_ecoli_seq_2bp_change']}
for result in obs_clusters:
for cluster in obs_clusters[result]:
self.assertTrue(cluster in expected_clusters[result])
expected_failures = []
self.assertEqual(failures, expected_failures)
def test_call_default_params_and_lower_id(self):
""" clusters seqs within 95% identity with default parameters """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'percent_id': 0.95
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
refseqs_fp=self.tmp_seq_filepath_97perc_id_rc)
expected_clusters = {'usearch_ecoli_seq': ['usearch_ecoli_seq'],
'usearch_ecoli_seq_1bp_change': ['usearch_ecoli_seq_1bp_change',
'usearch_ecoli_seq_2bp_change']}
for result in obs_clusters:
for cluster in obs_clusters[result]:
self.assertTrue(cluster in expected_clusters[result])
expected_failures = []
self.assertEqual(failures, expected_failures)
def test_call_default_params_and_higher_id(self):
""" clusters seqs within 99% identity with default parameters """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'percent_id': 0.99
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
refseqs_fp=self.tmp_seq_filepath_97perc_id_rc)
# Seqs should fall into separate clusters
expected_clusters = {'denovo0': ['usearch_ecoli_seq_2bp_change'],
'usearch_ecoli_seq': ['usearch_ecoli_seq'],
'usearch_ecoli_seq_1bp_change': ['usearch_ecoli_seq_1bp_change']}
self.assertEqual(obs_clusters, expected_clusters)
expected_failures = []
self.assertEqual(failures, expected_failures)
def test_call_default_params_reversed_seq(self):
""" Does not cluster reverse complemented sequence without --rev """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'suppress_new_clusters':
True,
'rev': False
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
refseqs_fp=self.tmp_seqs_rc_single_seq)
# As seqs are not in same frame, should all fail.
expected_clusters =\
{}
self.assertEqual(obs_clusters, expected_clusters)
expected_failures = ['usearch_ecoli_seq',
'usearch_ecoli_seq_2bp_change',
'usearch_ecoli_seq_1bp_change']
self.assertEqual(len(failures), 3)
for curr_failure in failures:
self.assertTrue(curr_failure in expected_failures)
def test_call_default_params_reversed_seq_w_rev(self):
""" Does not cluster reverse complemented sequence without --rev """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'rev': True,
'suppress_new_clusters': True
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
refseqs_fp=self.tmp_seq_filepath_97perc_id_rc)
# All seqs should fall into a single cluster
expected_clusters =\
{'usearch_ecoli_seq_2bp_change_rc': ['usearch_ecoli_seq_2bp_change'],
'usearch_ecoli_seq': ['usearch_ecoli_seq'],
'usearch_ecoli_seq_1bp_change': ['usearch_ecoli_seq_1bp_change']}
self.assertEqual(obs_clusters, expected_clusters)
expected_failures = []
self.assertEqual(failures, expected_failures)
def test_call_default_params_save_intermediate_files(self):
""" Preserves files if save_intermediate_files/logs is True """
intermediate_files_dir = self.output_dir + "/test_usearch61/"
create_dir(intermediate_files_dir)
self._dirs_to_remove.append(intermediate_files_dir)
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': True,
'output_dir':
intermediate_files_dir,
'remove_usearch_logs': False
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
refseqs_fp=self.tmp_seq_filepath_97perc_id_rc)
expected_intermediate_fps =\
[join(intermediate_files_dir, "abundance_sorted.fna"),
join(intermediate_files_dir, "abundance_sorted.log"),
join(intermediate_files_dir, "abundance_sorted.uc"),
join(intermediate_files_dir, "ref_clustered.log"),
join(intermediate_files_dir, "ref_clustered.uc")]
for curr_file in expected_intermediate_fps:
self.assertTrue(exists(curr_file))
expected_failures = []
self.assertEqual(failures, expected_failures)
def test_call_default_params_save_intermediate_files_fast_cluster(self):
""" Preserves files if save_intermediate_files/logs is True """
intermediate_files_dir = self.output_dir + "/test_usearch61_fast_1160/"
create_dir(intermediate_files_dir)
self._dirs_to_remove.append(intermediate_files_dir)
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': True,
'output_dir':
intermediate_files_dir,
'remove_usearch_logs': False,
'usearch61_sort_method':
'length',
'usearch_fast_cluster': True
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
refseqs_fp=self.tmp_seq_filepath_97perc_id_rc)
expected_intermediate_fps =\
[join(intermediate_files_dir, "abundance_sorted.fna"),
join(intermediate_files_dir, "abundance_sorted.log"),
join(intermediate_files_dir, "abundance_sorted.uc"),
join(intermediate_files_dir, "ref_clustered.log"),
join(intermediate_files_dir, "ref_clustered.uc")]
for curr_file in expected_intermediate_fps:
self.assertTrue(exists(curr_file))
expected_clusters = {'usearch_ecoli_seq': ['usearch_ecoli_seq'],
'usearch_ecoli_seq_1bp_change': ['usearch_ecoli_seq_2bp_change',
'usearch_ecoli_seq_1bp_change']}
for result in obs_clusters:
for cluster in obs_clusters[result]:
self.assertTrue(cluster in expected_clusters[result])
expected_failures = []
self.assertEqual(failures, expected_failures)
# Don't have a good way to catch this error currently
'''def test_call_default_params_minlen(self):
""" Discards reads that fall below minlen setting """
app = Usearch61ReferenceOtuPicker(params={'save_intermediate_files':False,
'output_dir':self.output_dir,
'remove_usearch_logs':True,
'minlen':101
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
refseqs_fp = self.tmp_seq_filepath_97perc_id_rc)
# Should get no results
expected_clusters = {}
self.assertEqual(obs_clusters, expected_clusters)
expected_failures = []
self.assertEqual(failures, expected_failures)'''
def test_usearch61_params(self):
""" usearch61 handles changes to other parameters """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'wordlength': 25,
'usearch61_maxrejects': 200,
'usearch61_maxaccepts': 5
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
otu_prefix="test", refseqs_fp=self.tmp_seq_filepath_97perc_id_rc)
# won't get 2bp_change as reference, due to RC status
expected_clusters = {'usearch_ecoli_seq': ['usearch_ecoli_seq'],
'usearch_ecoli_seq_1bp_change': ['usearch_ecoli_seq_1bp_change',
'usearch_ecoli_seq_2bp_change']}
for result in obs_clusters:
for cluster in obs_clusters[result]:
self.assertTrue(cluster in expected_clusters[result])
expected_failures = []
self.assertEqual(failures, expected_failures)
def test_usearch61_length_sorting(self):
""" Sorting according to length, clusters seqs """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'usearch61_sort_method':
'length'
})
obs_clusters, failures = app(self.tmp_seqs_usearch97perc_id_len_diff,
refseqs_fp=self.tmp_seq_filepath_97perc_id_rc)
expected_clusters = {'usearch_ecoli_seq': ['usearch_ecoli_seq'],
'usearch_ecoli_seq_1bp_change': ['usearch_ecoli_seq_1bp_change',
'usearch_ecoli_seq_2bp_change']}
for result in obs_clusters:
for cluster in obs_clusters[result]:
self.assertTrue(cluster in expected_clusters[result])
expected_failures = []
self.assertEqual(failures, expected_failures)
def test_usearch61_sizeorder(self):
""" Handles sizeorder option, clusters seqs """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'sizeorder': True
})
obs_clusters, failures = app(self.tmp_seqs_usearch_97perc_dups,
refseqs_fp=self.tmp_seq_filepath_97perc_id_rc)
# Should have ecoli match ecoli, and remaining seqs match 1bp change.
expected_clusters = {'usearch_ecoli_seq': ['usearch_ecoli_seq'],
'usearch_ecoli_seq_1bp_change': ['usearch_ecoli_seq_1bp_change',
'usearch_ecoli_seq_2bp_change', 'usearch_ecoli_seq_1bp_change_dup1',
'usearch_ecoli_seq_1bp_change_dup2']}
for result in obs_clusters:
for cluster in obs_clusters[result]:
self.assertTrue(cluster in expected_clusters[result])
expected_failures = []
self.assertEqual(failures, expected_failures)
def test_closed_reference_usearch61(self):
""" usearch61 does closed reference OTU picking successfully """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'suppress_new_clusters': True
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
refseqs_fp=self.tmp_seqs_rc_single_seq)
# Randomly selected match is used for equivalent matches, so need to
# test for results without order affecting output
expected_clusters = {}
self.assertEqual(obs_clusters, expected_clusters)
expected_failures = ['usearch_ecoli_seq',
'usearch_ecoli_seq_2bp_change', 'usearch_ecoli_seq_1bp_change']
self.assertItemsEqual(failures, expected_failures)
def test_closed_reference_with_match_usearch61(self):
""" usearch61 does closed reference OTU picking successfully """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'suppress_new_clusters': True
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id_rc,
refseqs_fp=self.tmp_seqs_rc_single_seq)
# Randomly selected match is used for equivalent matches, so need to
# test for results without order affecting output
expected_clusters = {'usearch_ecoli_seq_2bp_change_rc':
['usearch_ecoli_seq_2bp_change_rc']}
self.assertEqual(obs_clusters, expected_clusters)
expected_failures = ['usearch_ecoli_seq',
'usearch_ecoli_seq_1bp_change']
self.assertEqual(set(failures), set(expected_failures))
def test_call_open_reference_usearch61(self):
""" usearch61 does open reference OTU picking successfully """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'suppress_new_clusters':
False
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,
refseqs_fp=self.tmp_seqs_rc_single_seq)
# Randomly selected match is used for equivalent matches, so need to
# test for results without order affecting output
expected_clusters = {'denovo0': ['usearch_ecoli_seq',
'usearch_ecoli_seq_2bp_change',
'usearch_ecoli_seq_1bp_change']}
for result in obs_clusters:
for cluster in obs_clusters[result]:
self.assertTrue(cluster in expected_clusters[result])
expected_failures = []
self.assertEqual(failures, expected_failures)
def test_call_open_reference_with_match_usearch61(self):
""" usearch61 does open reference OTU picking successfully """
app = Usearch61ReferenceOtuPicker(
params={'save_intermediate_files': False,
'output_dir':
self.output_dir,
'remove_usearch_logs': True,
'suppress_new_clusters':
False
})
obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id_rc,
refseqs_fp=self.tmp_seqs_rc_single_seq)
# Randomly selected match is used for equivalent matches, so need to
# test for results without order affecting output
expected_clusters = {'denovo0': ['usearch_ecoli_seq',
'usearch_ecoli_seq_1bp_change'],
'usearch_ecoli_seq_2bp_change_rc':
['usearch_ecoli_seq_2bp_change_rc']}
for result in obs_clusters:
for cluster in obs_clusters[result]:
self.assertTrue(cluster in expected_clusters[result])
expected_failures = []
self.assertEqual(failures, expected_failures)
class UsearchOtuPickerTests(TestCase):
""" Tests of the usearch-based OTU picker """
def setUp(self):
# create the temporary input files
self.dna_seqs_3 = dna_seqs_3
self.dna_seqs_3_derep = dna_seqs_3_derep
self.dna_seqs_4 = dna_seqs_usearch
self.ref_database = usearch_ref_seqs1
self.temp_dir = get_qiime_temp_dir()
fd, self.tmp_seq_filepath1 = mkstemp(
prefix='UsearchOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath1, 'w')
seq_file.write(self.dna_seqs_3)
seq_file.close()
fd, self.tmp_seq_filepath1_derep = mkstemp(
prefix='UsearchOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath1_derep, 'w')
seq_file.write(self.dna_seqs_3_derep)
seq_file.close()
fd, self.tmp_seq_filepath2 = mkstemp(
prefix='UsearchOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath2, 'w')
seq_file.write(self.dna_seqs_4)
seq_file.close()
fd, self.tmp_ref_database = mkstemp(
prefix='UsearchRefDatabase_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_ref_database, 'w')
seq_file.write(self.ref_database)
seq_file.close()
self._files_to_remove =\
[self.tmp_seq_filepath1, self.tmp_seq_filepath1_derep,
self.tmp_seq_filepath2, self.tmp_ref_database]
self._dirs_to_remove = []
def tearDown(self):
remove_files(self._files_to_remove)
if self._dirs_to_remove:
for curr_dir in self._dirs_to_remove:
rmtree(curr_dir)
def seqs_to_temp_fasta(self, seqs):
""" """
fd, fp = mkstemp(
prefix='UsearchOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(fp, 'w')
self._files_to_remove.append(fp)
for s in seqs:
seq_file.write('>%s\n%s\n' % s)
seq_file.close()
return fp
def test_call_default_params(self):
"""UsearchOtuPicker.__call__ returns expected clusters default params"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# All seqs should create own cluster
exp_otu_ids = [str(x) for x in range(10)]
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6'],
['uclust_test_seqs_7'],
['uclust_test_seqs_8'],
['uclust_test_seqs_9']]
app = UsearchOtuPicker(params={'save_intermediate_files': False,
'db_filepath': self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection': True,
'minlen': 12,
'w': 12,
'minsize': 1
})
obs = app(self.tmp_seq_filepath1)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_derep(self):
"""UsearchOtuPicker.__call__ returns expected clusters when using
--derep_fullseq"""
# adapted from test_call_default_params
# Sequences 1 and 9 have exact replicates
exp_otu_ids = [str(x) for x in range(10)]
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1',
'uclust_test_seqs_1rep',
'uclust_test_seqs_1rep2'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6'],
['uclust_test_seqs_7'],
['uclust_test_seqs_8'],
['uclust_test_seqs_9',
'uclust_test_seqs_9rep']]
app = UsearchOtuPicker(params={'save_intermediate_files': False,
'db_filepath': self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'minlen': 12,
'w': 12,
'minsize': 1,
'derep_fullseq': True
})
obs = app(self.tmp_seq_filepath1_derep)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_default_no_reference(self):
"""UsearchOtuPicker.__call__ returns expected clusters no referencedb"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# All seqs should create own cluster
exp_otu_ids = [str(x) for x in range(10)]
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6'],
['uclust_test_seqs_7'],
['uclust_test_seqs_8'],
['uclust_test_seqs_9']]
app = UsearchOtuPicker(params={'save_intermediate_files': False,
'db_filepath': self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection': False,
'minlen': 12,
'w': 12,
'minsize': 1
})
obs = app(self.tmp_seq_filepath1)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_low_cluster_identity(self):
"""UsearchOtuPicker.__call__ returns expected clusters"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should only get 6 clusters
exp_otu_ids = [str(x) for x in range(6)]
exp_clusters =\
[['uclust_test_seqs_0', 'uclust_test_seqs_6',
'uclust_test_seqs_9'], ['uclust_test_seqs_1'], ['uclust_test_seqs_2'],
['uclust_test_seqs_3',
'uclust_test_seqs_5',
'uclust_test_seqs_8'],
['uclust_test_seqs_4'], ['uclust_test_seqs_7']]
app = UsearchOtuPicker(params={'save_intermediate_files': False,
'db_filepath': self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection': False,
'de_novo_chimera_detection': False,
'cluster_size_filtering': False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.80,
'percent_id_err': 0.97
})
obs = app(self.tmp_seq_filepath1)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_detects_de_novo_chimeras(self):
"""UsearchOtuPicker.__call__ returns expected clusters"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should
exp_otu_ids = ['2', '3', '4', '5']
exp_clusters = [['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_4'],
['uclust_test_seqs_7']
]
app = UsearchOtuPicker(params={'save_intermediate_files': False,
'db_filepath': self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection': False,
'de_novo_chimera_detection': True,
'cluster_size_filtering': False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.97,
'percent_id_err': 0.80,
'abundance_skew': 2
})
obs = app(self.tmp_seq_filepath1)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_detects_reference_chimeras(self):
"""UsearchOtuPicker.__call__ returns expected clusters"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should detect and remove chimeric sequence based
# during ref based detection
exp_otu_ids = ['0', '1']
exp_clusters = [['Solemya', 'Solemya_seq2'],
['usearch_ecoli_seq', 'usearch_ecoli_seq2']
]
app = UsearchOtuPicker(params={'save_intermediate_files': False,
'db_filepath': self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection': True,
'de_novo_chimera_detection': False,
'cluster_size_filtering': False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.97,
'percent_id_err': 0.97,
'abundance_skew': 2
})
obs = app(self.tmp_seq_filepath2)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_usearch_handles_intersections(self):
"""UsearchOtuPicker.__call__ returns expected clusters"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should detect and remove chimeric sequence based
# during ref based detection
exp_otu_ids = ['0', '1']
exp_clusters = [['Solemya', 'Solemya_seq2'],
['usearch_ecoli_seq', 'usearch_ecoli_seq2']
]
app = UsearchOtuPicker(params={'save_intermediate_files': False,
'db_filepath': self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection': True,
'de_novo_chimera_detection': True,
'cluster_size_filtering': False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.97,
'percent_id_err': 0.97,
'abundance_skew': 2,
'chimeras_retention': 'intersection'
})
obs = app(self.tmp_seq_filepath2)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_usearch_handles_unions(self):
"""UsearchOtuPicker.__call__ returns expected clusters"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should detect and remove chimeric sequence based
# during ref based detection
exp_otu_ids = ['0', '1', '2']
# will retain 'chimera' with union option.
exp_clusters = [['Solemya', 'Solemya_seq2'], ['chimera'],
['usearch_ecoli_seq', 'usearch_ecoli_seq2']
]
app = UsearchOtuPicker(params={'save_intermediate_files': False,
'db_filepath': self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection': True,
'de_novo_chimera_detection': True,
'cluster_size_filtering': False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.97,
'percent_id_err': 0.97,
'abundance_skew': 2,
'chimeras_retention': 'union'
})
obs = app(self.tmp_seq_filepath2)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_writes_output(self):
"""UsearchOtuPicker.__call__ writes expected output clusters file"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should detect and remove chimeric sequence based
# during ref based detection, then write the OTU mapping file in
# QIIME format.
fd, self.tmp_result_path = mkstemp(
prefix='UsearchOTUMapping_',
suffix='.txt')
close(fd)
f = open(self.tmp_result_path, "w")
fd, self.tmp_failures_path = mkstemp(
prefix='UsearchFailures_',
suffix='.txt')
close(fd)
f = open(self.tmp_failures_path, "w")
self._files_to_remove.append(self.tmp_result_path)
self._files_to_remove.append(self.tmp_failures_path)
app = UsearchOtuPicker(params={'save_intermediate_files': False,
'db_filepath': self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection': True,
'de_novo_chimera_detection': False,
'cluster_size_filtering': False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.97,
'percent_id_err': 0.97,
'abundance_skew': 2
})
obs = app(self.tmp_seq_filepath2, result_path=self.tmp_result_path,
failure_path=self.tmp_failures_path)
expected_otu_mapping =\
["1\tSolemya\tSolemya_seq2\n",
"0\tusearch_ecoli_seq\tusearch_ecoli_seq2\n"""
]
f = open(self.tmp_result_path, "U")
actual_otu_mapping = f.readlines()
self.assertEqual(actual_otu_mapping, expected_otu_mapping)
expected_failures = ["chimera"]
f = open(self.tmp_failures_path, "U")
actual_failures = f.readlines()
self.assertEqual(actual_failures, expected_failures)
class UsearchReferenceOtuPickerTests(TestCase):
""" Tests of the usearch-based OTU picker """
def setUp(self):
# create the temporary input files
self.dna_seqs_3 = dna_seqs_3
self.dna_seqs_3_derep = dna_seqs_3_derep
self.dna_seqs_4 = dna_seqs_usearch
self.ref_database = usearch_ref_seqs1
self.otu_ref_database = uclustref_query_seqs1
self.temp_dir = get_qiime_temp_dir()
fd, self.tmp_seq_filepath1 = mkstemp(
prefix='UsearchOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath1, 'w')
seq_file.write(self.dna_seqs_3)
seq_file.close()
fd, self.tmp_seq_filepath1_derep = mkstemp(
prefix='UsearchOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath1_derep, 'w')
seq_file.write(self.dna_seqs_3_derep)
seq_file.close()
fd, self.tmp_seq_filepath2 = mkstemp(
prefix='UsearchOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath2, 'w')
seq_file.write(self.dna_seqs_4)
seq_file.close()
fd, self.tmp_ref_database = mkstemp(
prefix='UsearchRefDatabase_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_ref_database, 'w')
seq_file.write(self.ref_database)
seq_file.close()
fd, self.tmp_otu_ref_database = mkstemp(
prefix='UsearchRefOtuDatabase_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_otu_ref_database, 'w')
seq_file.write(self.otu_ref_database)
seq_file.close()
self._files_to_remove =\
[self.tmp_seq_filepath1, self.tmp_seq_filepath2,
self.tmp_seq_filepath1_derep, self.tmp_ref_database,
self.tmp_otu_ref_database]
self._dirs_to_remove = []
def tearDown(self):
remove_files(self._files_to_remove)
if self._dirs_to_remove:
for curr_dir in self._dirs_to_remove:
rmtree(curr_dir)
def seqs_to_temp_fasta(self, seqs):
""" """
fd, fp = mkstemp(
prefix='UsearchOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(fp, 'w')
self._files_to_remove.append(fp)
for s in seqs:
seq_file.write('>%s\n%s\n' % s)
seq_file.close()
return fp
def test_call_default_params(self):
"""UsearchOtuPicker.__call__ returns expected clusters default params"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# All seqs should create own cluster
exp_otu_ids = [str(x) for x in range(10)]
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6'],
['uclust_test_seqs_7'],
['uclust_test_seqs_8'],
['uclust_test_seqs_9']]
app = UsearchReferenceOtuPicker(
params={'save_intermediate_files': False,
'db_filepath':
self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection':
True,
'minlen': 12,
'w': 12,
'minsize': 1
})
obs = app(self.tmp_seq_filepath1, self.tmp_otu_ref_database)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_derep(self):
"""UsearchOtuPicker.__call__ returns expected clusters when using
--derep_fullseq"""
# adapted from test_call_default_params
# Sequences 1 and 9 have exact replicates
exp_otu_ids = [str(x) for x in range(10)]
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1',
'uclust_test_seqs_1rep',
'uclust_test_seqs_1rep2'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6'],
['uclust_test_seqs_7'],
['uclust_test_seqs_8'],
['uclust_test_seqs_9',
'uclust_test_seqs_9rep']]
app = UsearchReferenceOtuPicker(
params={'save_intermediate_files': False,
'db_filepath':
self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection':
True,
'minlen': 12,
'w': 12,
'minsize': 1,
'derep_fullseq': True
})
obs = app(self.tmp_seq_filepath1_derep, self.tmp_otu_ref_database)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_default_no_reference(self):
"""UsearchOtuPicker.__call__ returns expected clusters no referencedb"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# All seqs should create own cluster
exp_otu_ids = [str(x) for x in range(10)]
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6'],
['uclust_test_seqs_7'],
['uclust_test_seqs_8'],
['uclust_test_seqs_9']]
app = UsearchReferenceOtuPicker(
params={'save_intermediate_files': False,
'db_filepath':
self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection':
False,
'minlen': 12,
'w': 12,
'minsize': 1
})
obs = app(self.tmp_seq_filepath1, self.tmp_otu_ref_database)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_low_cluster_identity(self):
"""UsearchOtuPicker.__call__ returns expected clusters"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should only get 6 clusters
exp_otu_ids = [str(x) for x in range(10)]
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6'],
['uclust_test_seqs_7'],
['uclust_test_seqs_8'],
['uclust_test_seqs_9']]
app = UsearchReferenceOtuPicker(
params={'save_intermediate_files': False,
'db_filepath':
self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection':
False,
'de_novo_chimera_detection':
False,
'cluster_size_filtering':
False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.80,
'percent_id_err': 0.97
})
obs = app(self.tmp_seq_filepath1, self.tmp_otu_ref_database)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_detects_de_novo_chimeras(self):
"""UsearchOtuPicker.__call__ returns expected clusters"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should
exp_otu_ids = ['0', '1', '2', '3']
exp_clusters = [['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_4'],
['uclust_test_seqs_7']
]
app = UsearchReferenceOtuPicker(
params={'save_intermediate_files': False,
'db_filepath':
self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection':
False,
'de_novo_chimera_detection':
True,
'cluster_size_filtering':
False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.97,
'percent_id_err': 0.80,
'abundance_skew': 2
})
obs = app(self.tmp_seq_filepath1, self.tmp_otu_ref_database)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_detects_reference_chimeras(self):
"""UsearchOtuPicker.__call__ returns expected clusters"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should detect and remove chimeric sequence based
# during ref based detection
exp_otu_ids = ['0', '1']
exp_clusters = [['Solemya', 'Solemya_seq2'],
['usearch_ecoli_seq', 'usearch_ecoli_seq2']
]
app = UsearchReferenceOtuPicker(
params={'save_intermediate_files': False,
'db_filepath':
self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection':
True,
'de_novo_chimera_detection':
False,
'cluster_size_filtering':
False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.97,
'percent_id_err': 0.97,
'abundance_skew': 2
})
obs = app(self.tmp_seq_filepath2, self.tmp_otu_ref_database)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_usearch_handles_intersections(self):
"""UsearchOtuPicker.__call__ returns expected clusters"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should detect and remove chimeric sequence based
# during ref based detection
exp_otu_ids = ['0', '1']
exp_clusters = [['Solemya', 'Solemya_seq2'],
['usearch_ecoli_seq', 'usearch_ecoli_seq2']
]
app = UsearchReferenceOtuPicker(
params={'save_intermediate_files': False,
'db_filepath':
self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection':
True,
'de_novo_chimera_detection':
True,
'cluster_size_filtering':
False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.97,
'percent_id_err': 0.97,
'abundance_skew': 2,
'chimeras_retention':
'intersection'
})
obs = app(self.tmp_seq_filepath2, self.tmp_otu_ref_database)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_usearch_handles_unions(self):
"""UsearchOtuPicker.__call__ returns expected clusters"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should detect and remove chimeric sequence based
# during ref based detection
exp_otu_ids = ['0', '1', '2']
# will retain 'chimera' with union option.
exp_clusters = [['Solemya', 'Solemya_seq2'], ['chimera'],
['usearch_ecoli_seq', 'usearch_ecoli_seq2']
]
app = UsearchReferenceOtuPicker(
params={'save_intermediate_files': False,
'db_filepath':
self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection':
True,
'de_novo_chimera_detection':
True,
'cluster_size_filtering':
False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.97,
'percent_id_err': 0.97,
'abundance_skew': 2,
'chimeras_retention': 'union'
})
obs = app(self.tmp_seq_filepath2, self.tmp_otu_ref_database)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_writes_output(self):
"""UsearchOtuPicker.__call__ writes expected output clusters file"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
# Should detect and remove chimeric sequence based
# during ref based detection, then write the OTU mapping file in
# QIIME format.
fd, self.tmp_result_path = mkstemp(
prefix='UsearchOTUMapping_',
suffix='.txt')
close(fd)
f = open(self.tmp_result_path, "w")
fd, self.tmp_failures_path = mkstemp(
prefix='UsearchFailures_',
suffix='.txt')
close(fd)
f = open(self.tmp_failures_path, "w")
self._files_to_remove.append(self.tmp_result_path)
self._files_to_remove.append(self.tmp_failures_path)
app = UsearchReferenceOtuPicker(
params={'save_intermediate_files': False,
'db_filepath':
self.tmp_ref_database,
'output_dir': self.temp_dir,
'remove_usearch_logs': True,
'reference_chimera_detection':
True,
'de_novo_chimera_detection':
False,
'cluster_size_filtering':
False,
'minlen': 12,
'w': 12,
'minsize': 1,
'percent_id': 0.97,
'percent_id_err': 0.97,
'abundance_skew': 2
})
obs = app(self.tmp_seq_filepath2, self.tmp_otu_ref_database,
result_path=self.tmp_result_path,
failure_path=self.tmp_failures_path)
expected_otu_mapping =\
["1\tSolemya\tSolemya_seq2\n",
"0\tusearch_ecoli_seq\tusearch_ecoli_seq2\n"""
]
f = open(self.tmp_result_path, "U")
actual_otu_mapping = f.readlines()
self.assertEqual(actual_otu_mapping, expected_otu_mapping)
expected_failures = ["chimera"]
f = open(self.tmp_failures_path, "U")
actual_failures = f.readlines()
self.assertEqual(actual_failures, expected_failures)
class UclustOtuPickerTests(TestCase):
""" Tests of the uclust-based OTU picker """
def setUp(self):
# create the temporary input files
self.temp_dir = get_qiime_temp_dir()
fd, self.tmp_seq_filepath1 = mkstemp(
dir=self.temp_dir,
prefix='UclustOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath1, 'w')
seq_file.write(dna_seqs_3)
seq_file.close()
fd, self.tmp_seq_filepath2 = mkstemp(
prefix='UclustOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath2, 'w')
seq_file.write(dna_seqs_4)
seq_file.close()
fd, self.tmp_seq_filepath3 = mkstemp(
prefix='UclustOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath3, 'w')
seq_file.write(dna_seqs_5)
seq_file.close()
fd, self.tmp_seq_filepath4 = mkstemp(
prefix='UclustOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath4, 'w')
seq_file.write(dna_seqs_6)
seq_file.close()
self._files_to_remove =\
[self.tmp_seq_filepath1, self.tmp_seq_filepath2,
self.tmp_seq_filepath3, self.tmp_seq_filepath4]
def tearDown(self):
remove_files(self._files_to_remove)
def seqs_to_temp_fasta(self, seqs):
""" """
fd, fp = mkstemp(
prefix='UclustReferenceOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(fp, 'w')
self._files_to_remove.append(fp)
for s in seqs:
seq_file.write('>%s\n%s\n' % s)
seq_file.close()
return fp
def test_toggle_collapse_identical_sequences(self):
"""UclustOtuPicker: toggle prefilter identical seqs doesn't affect clusters
"""
# generate result including prefilter
app_w_collapse_identical =\
UclustOtuPicker(params={'Similarity': 0.90,
'save_uc_files': False,
'prefilter_identical_sequences': True,
'output_dir': self.temp_dir})
result_w_collapse_identical = \
sorted(app_w_collapse_identical(self.tmp_seq_filepath4).values())
# generate result excluding prefilter
app_wo_collapse_identical =\
UclustOtuPicker(params={'Similarity': 0.90,
'save_uc_files': False,
'prefilter_identical_sequences': False,
'output_dir': self.temp_dir})
result_wo_collapse_identical = \
sorted(app_wo_collapse_identical(self.tmp_seq_filepath4).values())
self.assertEqual(result_w_collapse_identical,
result_wo_collapse_identical)
def test_toggle_suppress_sort(self):
"""UclustOtuPicker: togging suppress sort functions as expected
"""
seqs = [('s1', 'ACCTTGTTACTTT'), # three copies
('s2', 'ACCTTGTTACTTTC'), # one copy
('s3', 'ACCTTGTTACTTTCC'), # two copies
('s4', 'ACCTTGTTACTTT'),
('s5', 'ACCTTGTTACTTTCC'),
('s6', 'ACCTTGTTACTTT')]
seqs_fp = self.seqs_to_temp_fasta(seqs)
# no abundance sorting and uclust's sorting enabled
# so length-based sorting
app = UclustOtuPicker(params={'Similarity': 0.80,
'enable_rev_strand_matching': False,
'suppress_sort': False,
'presort_by_abundance': False,
'save_uc_files': False})
obs = app(seqs_fp)
exp = {0: ['s3', 's5', 's2', 's1', 's4', 's6']}
self.assertEqual(obs, exp)
# no abundance sorting and uclust's sorting enabled
# so no sorting at all
app = UclustOtuPicker(params={'Similarity': 0.80,
'enable_rev_strand_matching': False,
'suppress_sort': True,
'presort_by_abundance': False,
'save_uc_files': False})
obs = app(seqs_fp)
exp = {0: ['s1', 's4', 's6', 's2', 's3', 's5']}
self.assertEqual(obs, exp)
def test_abundance_sort(self):
"""UclustOtuPicker: abundance sort functions as expected
"""
# enable abundance sorting with suppress sort = False (it gets
# set to True internally, otherwise uclust's length sort would
# override the abundance sorting)
seqs = [('s1 comment1', 'ACCTTGTTACTTT'), # three copies
('s2 comment2', 'ACCTTGTTACTTTC'), # one copy
('s3 comment3', 'ACCTTGTTACTTTCC'), # two copies
('s4 comment4', 'ACCTTGTTACTTT'),
('s5 comment5', 'ACCTTGTTACTTTCC'),
('s6 comment6', 'ACCTTGTTACTTT')]
seqs_fp = self.seqs_to_temp_fasta(seqs)
# abundance sorting changes order
app = UclustOtuPicker(params={'Similarity': 0.80,
'enable_rev_strand_matching': False,
'suppress_sort': False,
'presort_by_abundance': True,
'save_uc_files': False})
obs = app(seqs_fp)
exp = {0: ['s1', 's4', 's6', 's3', 's5', 's2']}
self.assertEqual(obs, exp)
# abundance sorting changes order -- same results with suppress_sort =
# True b/c (it gets set to True to when presorting by abundance)
app = UclustOtuPicker(params={'Similarity': 0.80,
'enable_rev_strand_matching': False,
'suppress_sort': True,
'presort_by_abundance': True,
'save_uc_files': False})
obs = app(seqs_fp)
exp = {0: ['s1', 's4', 's6', 's3', 's5', 's2']}
self.assertEqual(obs, exp)
def test_call_default_params(self):
"""UclustOtuPicker.__call__ returns expected clusters default params"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
exp_otu_ids = range(10)
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6'],
['uclust_test_seqs_7'],
['uclust_test_seqs_8'],
['uclust_test_seqs_9']]
app = UclustOtuPicker(params={'save_uc_files': False})
obs = app(self.tmp_seq_filepath1)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_default_params_suppress_sort(self):
"""UclustOtuPicker.__call__ returns expected clusters default params"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
exp_otu_ids = range(10)
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6'],
['uclust_test_seqs_7'],
['uclust_test_seqs_8'],
['uclust_test_seqs_9']]
app = UclustOtuPicker(params={'save_uc_files': False,
'suppress_sort': True})
obs = app(self.tmp_seq_filepath1)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_default_params_save_uc_file(self):
""" returns expected clusters default params, writes correct .uc file"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
exp_otu_ids = range(10)
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6'],
['uclust_test_seqs_7'],
['uclust_test_seqs_8'],
['uclust_test_seqs_9']]
app = UclustOtuPicker(params={'save_uc_files': True,
'output_dir': self.temp_dir})
obs = app(self.tmp_seq_filepath1)
uc_output_fp = self.tmp_seq_filepath1.replace('.fasta', '_clusters.uc')
uc_output_f = open(uc_output_fp, "U")
self._files_to_remove.append(uc_output_fp)
# Testing content of file minus header (tmp filename of sorted fasta
# file difficult to access here). Also not testing the version number
# of uclust that could vary between systems but still function for the
# purpose of generating appropriate clusters.
uc_result = [line.strip() for line in uc_output_f][2:]
self.assertEqual(uc_result, expected_uc_output)
# Make sure other results are correct with uc file being saved.
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_alt_threshold(self):
"""UclustOtuPicker.__call__ returns expected clusters with alt threshold
"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
exp_otu_ids = range(9)
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6', 'uclust_test_seqs_8'],
['uclust_test_seqs_7'],
['uclust_test_seqs_9']]
app = UclustOtuPicker(params={'Similarity': 0.90,
'suppress_sort': False,
'presort_by_abundance': False,
'save_uc_files': False})
obs = app(self.tmp_seq_filepath1)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_otu_id_prefix(self):
"""UclustOtuPicker.__call__ returns expected clusters with alt threshold
"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
exp_otu_ids = ['my_otu_%d' % i for i in range(9)]
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6', 'uclust_test_seqs_8'],
['uclust_test_seqs_7'],
['uclust_test_seqs_9']]
app = UclustOtuPicker(params={'Similarity': 0.90,
'suppress_sort': False,
'presort_by_abundance': False,
'new_cluster_identifier': 'my_otu_',
'save_uc_files': False})
obs = app(self.tmp_seq_filepath1)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_suppress_sort(self):
"""UclustOtuPicker.__call__ handles suppress sort
"""
exp_otu_ids = range(3)
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2']]
app = UclustOtuPicker(params={'Similarity': 0.90,
'suppress_sort': True,
'optimal': True,
'enable_rev_strand_matching': True,
'save_uc_files': False})
obs = app(self.tmp_seq_filepath2)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_call_rev_matching(self):
"""UclustOtuPicker.__call__ handles reverse strand matching
"""
exp_otu_ids = range(2)
exp_clusters = [['uclust_test_seqs_0'], ['uclust_test_seqs_0_rc']]
app = UclustOtuPicker(params={'Similarity': 0.90,
'enable_rev_strand_matching': False,
'suppress_sort': False,
'presort_by_abundance': False,
'save_uc_files': False})
obs = app(self.tmp_seq_filepath3)
obs_otu_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
exp = {0: ['uclust_test_seqs_0', 'uclust_test_seqs_0_rc']}
app = UclustOtuPicker(params={'Similarity': 0.90,
'enable_rev_strand_matching': True,
'suppress_sort': False,
'presort_by_abundance': False,
'save_uc_files': False})
obs = app(self.tmp_seq_filepath3)
self.assertEqual(obs, exp)
def test_call_output_to_file(self):
"""UclustHitOtuPicker.__call__ output to file functions as expected
"""
fd, tmp_result_filepath = mkstemp(
prefix='UclustOtuPickerTest.test_call_output_to_file_',
suffix='.txt')
close(fd)
app = UclustOtuPicker(params={'Similarity': 0.90,
'suppress_sort': False,
'presort_by_abundance': False,
'save_uc_files': False})
obs = app(self.tmp_seq_filepath1, result_path=tmp_result_filepath)
result_file = open(tmp_result_filepath)
result_file_str = result_file.read()
result_file.close()
# remove the result file before running the test, so in
# case it fails the temp file is still cleaned up
remove(tmp_result_filepath)
exp_otu_ids = map(str, range(9))
exp_clusters = [['uclust_test_seqs_0'],
['uclust_test_seqs_1'],
['uclust_test_seqs_2'],
['uclust_test_seqs_3'],
['uclust_test_seqs_4'],
['uclust_test_seqs_5'],
['uclust_test_seqs_6', 'uclust_test_seqs_8'],
['uclust_test_seqs_7'],
['uclust_test_seqs_9']]
obs_otu_ids = []
obs_clusters = []
for line in result_file_str.split('\n'):
if line:
fields = line.split('\t')
obs_otu_ids.append(fields[0])
obs_clusters.append(fields[1:])
obs_otu_ids.sort()
obs_clusters.sort()
# The relation between otu ids and clusters is abitrary, and
# is not stable due to use of dicts when parsing clusters -- therefore
# just checks that we have the expected group of each
self.assertEqual(obs_otu_ids, exp_otu_ids)
self.assertEqual(obs_clusters, exp_clusters)
# confirm that nothing is returned when result_path is specified
self.assertEqual(obs, None)
def test_call_log_file(self):
"""UclustOtuPicker.__call__ writes log when expected
"""
fd, tmp_log_filepath = mkstemp(
prefix='UclustOtuPickerTest.test_call_output_to_file_l_',
suffix='.txt')
close(fd)
fd, tmp_result_filepath = mkstemp(
prefix='UclustOtuPickerTest.test_call_output_to_file_r_',
suffix='.txt')
close(fd)
app = UclustOtuPicker(params={'Similarity': 0.99,
'save_uc_files': False})
obs = app(self.tmp_seq_filepath1,
result_path=tmp_result_filepath, log_path=tmp_log_filepath)
log_file = open(tmp_log_filepath)
log_file_str = log_file.read()
log_file.close()
# remove the temp files before running the test, so in
# case it fails the temp file is still cleaned up
remove(tmp_log_filepath)
remove(tmp_result_filepath)
log_file_99_exp = ["UclustOtuPicker parameters:",
"Similarity:0.99", "Application:uclust",
"enable_rev_strand_matching:False",
"suppress_sort:True",
"optimal:False",
'max_accepts:1',
'max_rejects:8',
'stepwords:8',
'word_length:8',
"exact:False",
"Num OTUs:10",
"new_cluster_identifier:None",
"presort_by_abundance:True",
"stable_sort:True",
"output_dir:.",
"save_uc_files:False",
"prefilter_identical_sequences:True",
"Result path: %s" % tmp_result_filepath]
# compare data in log file to fake expected log file
# NOTE: Since app.params is a dict, the order of lines is not
# guaranteed, so testing is performed to make sure that
# the equal unordered lists of lines is present in actual and expected
self.assertItemsEqual(log_file_str.split('\n'), log_file_99_exp)
def test_map_filtered_clusters_to_full_clusters(self):
"""UclustOtuPicker._map_filtered_clusters_to_full_clusters functions as expected
"""
# original and mapped full clusters are the same
app = UclustOtuPicker(params={})
filter_map = {'s1': ['s1'], 's2': ['s2'],
's3': ['s3'], 's4': ['s4'],
's5': ['s5'], 's6': ['s6']}
clusters = [['s1'], ['s2'], ['s3'], ['s4'], ['s5'], ['s6']]
actual = app._map_filtered_clusters_to_full_clusters(
clusters,
filter_map)
expected = clusters
self.assertEqual(actual, expected)
# original and mapped full clusters are not the same
filter_map = {'s1': ['s1', 's2', 's3', 's4'], 's5': ['s5', 's6']}
clusters = [['s1', 's5']]
actual = app._map_filtered_clusters_to_full_clusters(
clusters,
filter_map)
for e in actual:
e.sort()
expected = [['s1', 's2', 's3', 's4', 's5', 's6']]
self.assertEqual(actual, expected)
filter_map = {'s1': ['s1', 's2', 's6'],
's3': ['s3'], 's5': ['s4', 's5']}
clusters = [['s1', 's3'], ['s5']]
actual = app._map_filtered_clusters_to_full_clusters(
clusters,
filter_map)
for e in actual:
e.sort()
expected = [['s1', 's2', 's3', 's6'], ['s4', 's5']]
self.assertEqual(actual, expected)
class UclustReferenceOtuPickerTests(TestCase):
""" Tests of the uclust reference-based OTU picker """
def setUp(self):
""" """
self.temp_dir = get_qiime_temp_dir()
fd, self.tmp_seq_filepath1 = mkstemp(
dir=self.temp_dir,
prefix='UclustReferenceOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath1, 'w')
seq_file.write(uclustref_query_seqs1)
seq_file.close()
fd, self.temp_ref_filepath1 = mkstemp(
prefix='UclustReferenceOtuPickerTest_',
suffix='.fasta')
close(fd)
ref_file = open(self.temp_ref_filepath1, 'w')
ref_file.write(uclustref_ref_seqs1)
ref_file.close()
self._files_to_remove =\
[self.tmp_seq_filepath1,
self.temp_ref_filepath1]
def tearDown(self):
remove_files(self._files_to_remove)
def seqs_to_temp_fasta(self, seqs):
""" """
fd, fp = mkstemp(
prefix='UclustReferenceOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(fp, 'w')
self._files_to_remove.append(fp)
for s in seqs:
seq_file.write('>%s\n%s\n' % s)
seq_file.close()
return fp
def test_toggle_suppress_sort(self):
"""UclustReferenceOtuPicker: togging suppress sort functions as expected
"""
seqs = [('s1 comment1', 'ACCTTGTTACTTT'), # three copies
('s2 comment2', 'ACCTTGTTACTTTC'), # one copy
('s3 comment3', 'ACCTTGTTACTTTCC'), # two copies
('s4 comment4', 'ACCTTGTTACTTT'),
('s5 comment5', 'ACCTTGTTACTTTCC'),
('s6 comment6', 'ACCTTGTTACTTT')]
seqs_fp = self.seqs_to_temp_fasta(seqs)
ref_seqs = [('r1 blah', 'ACCTTGTTACTTT')]
ref_seqs_fp = self.seqs_to_temp_fasta(ref_seqs)
# no abundance sorting and uclust's sorting enabled
# so length-based sorting
app = UclustReferenceOtuPicker(params={'Similarity': 0.80,
'enable_rev_strand_matching':
False,
'suppress_sort': False,
'presort_by_abundance': False,
'save_uc_files': False})
obs = app(seqs_fp, ref_seqs_fp)
exp = {'r1': ['s3', 's5', 's2', 's1', 's4', 's6']}
self.assertEqual(obs, exp)
# no abundance sorting and uclust's sorting enabled
# so no sorting at all
app = UclustReferenceOtuPicker(params={'Similarity': 0.80,
'enable_rev_strand_matching':
False,
'suppress_sort': True,
'presort_by_abundance': False,
'save_uc_files': False})
obs = app(seqs_fp, ref_seqs_fp)
exp = {'r1': ['s1', 's4', 's6', 's2', 's3', 's5']}
self.assertEqual(obs, exp)
def test_abundance_sort(self):
"""UclustReferenceOtuPicker: abundance sort functions as expected
"""
# enable abundance sorting with suppress sort = False (it gets
# set to True internally, otherwise uclust's length sort would
# override the abundance sorting)
seqs = [('s1 comment1', 'ACCTTGTTACTTT'), # three copies
('s2 comment2', 'ACCTTGTTACTTTC'), # one copy
('s3 comment3', 'ACCTTGTTACTTTCC'), # two copies
('s4 comment4', 'ACCTTGTTACTTT'),
('s5 comment5', 'ACCTTGTTACTTTCC'),
('s6 comment6', 'ACCTTGTTACTTT')]
seqs_fp = self.seqs_to_temp_fasta(seqs)
ref_seqs = [('r1 blah', 'ACCTTGTTACTTT')]
ref_seqs_fp = self.seqs_to_temp_fasta(ref_seqs)
# abundance sorting changes order
app = UclustReferenceOtuPicker(params={'Similarity': 0.80,
'enable_rev_strand_matching':
False,
'suppress_sort': True,
'presort_by_abundance': True,
'save_uc_files': False})
obs = app(seqs_fp, ref_seqs_fp)
exp = {'r1': ['s1', 's4', 's6', 's3', 's5', 's2']}
self.assertEqual(obs, exp)
# abundance sorting changes order -- same results with suppress_sort =
# True b/c (it gets set to True to when presorting by abundance)
app = UclustReferenceOtuPicker(params={'Similarity': 0.80,
'enable_rev_strand_matching':
False,
'suppress_sort': True,
'presort_by_abundance': True,
'save_uc_files': False})
obs = app(seqs_fp, ref_seqs_fp)
exp = {'r1': ['s1', 's4', 's6', 's3', 's5', 's2']}
self.assertEqual(obs, exp)
def test_toggle_suppress_new_clusters(self):
"""UclustReferenceOtuPicker: toggle suppress new clusters
"""
seqs = [('s1 a', 'ACCTTGTTACTTT'),
('s2 bb', 'ACCTAGTTACTTT'),
('s3 c c', 'TTGCGTAACGTTTGAC')]
ref_seqs = [
('r1 d', 'ACCTCGTTACTTT')]
# these seqs should match at 0.90, but don't -- I can confirm this
# running uclust directly, and have contacted Robert Edgar for
# clarification
uc = UclustReferenceOtuPicker({'Similarity': 0.80,
'new_cluster_identifier': 'new_',
'next_new_cluster_number': 42,
'suppress_new_clusters': True,
'save_uc_files': False})
obs = uc(self.seqs_to_temp_fasta(seqs),
self.seqs_to_temp_fasta(ref_seqs), HALT_EXEC=False)
exp = {'r1': ['s1', 's2']}
self.assertEqual(obs, exp)
# add seq that clusters independently
uc = UclustReferenceOtuPicker({'Similarity': 0.80,
'new_cluster_identifier': 'new_',
'next_new_cluster_number': 42,
'suppress_new_clusters': False,
'save_uc_files': False})
obs = uc(self.seqs_to_temp_fasta(seqs),
self.seqs_to_temp_fasta(ref_seqs), HALT_EXEC=False)
exp = {'r1': ['s1', 's2'], 'new_42': ['s3']}
self.assertEqual(obs, exp)
def test_toggle_collapse_identical_sequences_prefilter_w_new_clusters(
self):
"""UclustReferenceOtuPicker: ident. seqs prefilter fns w new clusters
"""
# s4 == s2 and s3 == s5
seqs = [('s1 a', 'ACCTTGTTACTTT'),
('s2 bb', 'ACCTAGTTACTTT'),
('s4 bb', 'ACCTAGTTACTTT'),
('s3 c c', 'TTGCGTAACGTTTGAC'),
('s5 c c', 'TTGCGTAACGTTTGAC')]
ref_seqs = [
('r1 d', 'ACCTCGTTACTTT')]
exp = {'r1': ['s2', 's4', 's1'],
'new_42': ['s3', 's5']}
# add seq that clusters independently
uc = UclustReferenceOtuPicker({'Similarity': 0.80,
'new_cluster_identifier': 'new_',
'next_new_cluster_number': 42,
'suppress_new_clusters': False,
'save_uc_files': False,
'prefilter_identical_sequences': False})
obs_no_prefilter = uc(self.seqs_to_temp_fasta(seqs),
self.seqs_to_temp_fasta(ref_seqs), HALT_EXEC=False)
self.assertEqual(obs_no_prefilter, exp)
uc = UclustReferenceOtuPicker({'Similarity': 0.80,
'new_cluster_identifier': 'new_',
'next_new_cluster_number': 42,
'suppress_new_clusters': False,
'save_uc_files': False,
'prefilter_identical_sequences': True})
obs_prefilter = uc(self.seqs_to_temp_fasta(seqs),
self.seqs_to_temp_fasta(ref_seqs), HALT_EXEC=False)
self.assertEqual(obs_prefilter, exp)
# a little paranoia never hurt anyone
self.assertEqual(obs_prefilter, obs_no_prefilter)
def test_toggle_collapse_identical_sequences_prefilter_wo_new_clusters(
self):
"""UclustReferenceOtuPicker: ident. seqs prefilter fns wo new clusters
"""
# s4 == s2 and s3 == s5
seqs = [('s1 a', 'ACCTTGTTACTTT'),
('s2 bb', 'ACCTAGTTACTTT'),
('s4 bb', 'ACCTAGTTACTTT'),
('s3 c c', 'TTGCGTAACGTTTGAC'),
('s5 c c', 'TTGCGTAACGTTTGAC')]
ref_seqs = [
('r1 d', 'ACCTCGTTACTTT')]
exp = {'r1': ['s2', 's4', 's1']}
# add seq that clusters independently
uc = UclustReferenceOtuPicker({'Similarity': 0.80,
'new_cluster_identifier': 'new_',
'next_new_cluster_number': 42,
'suppress_new_clusters': True,
'save_uc_files': False,
'prefilter_identical_sequences': False})
fd, fail_path_no_prefilter = mkstemp(
prefix='UclustRefOtuPickerFailures', suffix='.txt')
close(fd)
self._files_to_remove.append(fail_path_no_prefilter)
obs_no_prefilter = uc(self.seqs_to_temp_fasta(seqs),
self.seqs_to_temp_fasta(ref_seqs),
failure_path=fail_path_no_prefilter,
HALT_EXEC=False)
self.assertEqual(obs_no_prefilter, exp)
self.assertEqual(open(fail_path_no_prefilter).read(),
"s3\ns5")
uc = UclustReferenceOtuPicker({'Similarity': 0.80,
'new_cluster_identifier': 'new_',
'next_new_cluster_number': 42,
'suppress_new_clusters': True,
'save_uc_files': False,
'prefilter_identical_sequences': True})
fd, fail_path_prefilter = mkstemp(
prefix='UclustRefOtuPickerFailures', suffix='.txt')
close(fd)
self._files_to_remove.append(fail_path_prefilter)
obs_prefilter = uc(self.seqs_to_temp_fasta(seqs),
self.seqs_to_temp_fasta(ref_seqs),
failure_path=fail_path_prefilter,
HALT_EXEC=False)
self.assertEqual(obs_prefilter, exp)
self.assertEqual(open(fail_path_prefilter).read(),
"s3\ns5")
# a little paranoia never hurt anyone
self.assertEqual(obs_prefilter, obs_no_prefilter)
self.assertEqual(open(fail_path_prefilter).read(),
open(fail_path_no_prefilter).read())
def test_varied_similarity(self):
"""UclustReferenceOtuPicker: varying similarity affects clustering
"""
seqs = [('s1', 'ACCTTGTTACTTT'),
('s2', 'ACCTAGTTACTTT')]
ref_seqs = [
('r1', 'ACCTCGTTACTTT')]
# these seqs should match at 0.90, but don't -- I can confirm this
# running uclust directly, and have contacted Robert Edgar for
# clarification
uc = UclustReferenceOtuPicker({'Similarity': 0.80,
'new_cluster_identifier': 'new_',
'next_new_cluster_number': 42,
'suppress_new_clusters': False,
'save_uc_files': False})
obs = uc(self.seqs_to_temp_fasta(seqs),
self.seqs_to_temp_fasta(ref_seqs), HALT_EXEC=False)
exp = {'r1': ['s1', 's2']}
self.assertEqual(obs, exp)
# set similarity to 100%
uc = UclustReferenceOtuPicker({'Similarity': 1.0,
'suppress_new_clusters': False,
'save_uc_files': False})
obs = uc(self.seqs_to_temp_fasta(seqs),
self.seqs_to_temp_fasta(ref_seqs), HALT_EXEC=False)
# testing is harder for new clusters, since the otu identifiers are
# arbitrary, and otu identifier assignment is based on order of
# iteration over a dict
exp1 = {'QiimeOTU1': ['s1'], 'QiimeOTU2': ['s2']}
exp2 = {'QiimeOTU2': ['s1'], 'QiimeOTU1': ['s2']}
self.assertTrue(obs == exp1 or obs == exp2)
def test_toggle_rev_strand_matching(self):
"""UclustReferenceOtuPicker: toggle rev strand matching
"""
# s3 and s4 are rc of one another
seqs = [('s1', 'ACCTTGTTACTTT'),
('s2', 'ACCTAGTTACTTT'),
('s3', 'TTGCGTAACGTTTGAC'),
('s4', 'GTCAAACGTTACGCAA')]
ref_seqs = [
('r1', 'ACCTCGTTACTTT')]
# rev strand matching disabled
uc = UclustReferenceOtuPicker({'Similarity': 0.80,
'new_cluster_identifier': 'new_',
'next_new_cluster_number': 42,
'enable_rev_strand_matching': False,
'save_uc_files': False})
obs = uc(self.seqs_to_temp_fasta(seqs),
self.seqs_to_temp_fasta(ref_seqs), HALT_EXEC=False)
exp = {'r1': ['s1', 's2'], 'new_42': ['s3'], 'new_43': ['s4']}
self.assertEqual(obs, exp)
# enable rev strand matching
uc = UclustReferenceOtuPicker({'Similarity': 0.80,
'new_cluster_identifier': 'new_',
'next_new_cluster_number': 42,
'enable_rev_strand_matching': True,
'save_uc_files': False})
obs = uc(self.seqs_to_temp_fasta(seqs),
self.seqs_to_temp_fasta(ref_seqs), HALT_EXEC=False)
exp = {'r1': ['s1', 's2'], 'new_42': ['s3', 's4']}
self.assertEqual(obs, exp)
def test_call_log_file(self):
"""UclustReferenceOtuPicker.__call__ writes log when expected
"""
fd, tmp_log_filepath = mkstemp(prefix='UclustReferenceOtuPicker',
suffix='log')
close(fd)
fd, tmp_result_filepath = mkstemp(
prefix='UclustReferenceOtuPicker',
suffix='txt')
close(fd)
fd, tmp_failure_filepath = mkstemp(
prefix='UclustReferenceOtuPicker',
suffix='txt')
close(fd)
seqs = [('s1', 'ACCTTGTTACTTT'),
('s2', 'ACCTAGTTACTTT'),
('s3', 'TTGCGTAACGTTTGAC'),
('s4', 'GTCAAACGTTACGCAA')]
ref_seqs = [
('r1', 'ACCTCGTTACTTT')]
# rev strand matching disabled
uc = UclustReferenceOtuPicker({'Similarity': 0.8,
'suppress_new_clusters': True,
'save_uc_files': False,
'suppress_sort': True})
ref_seqs_fp = self.seqs_to_temp_fasta(ref_seqs)
obs = uc(self.seqs_to_temp_fasta(seqs),
ref_seqs_fp,
result_path=tmp_result_filepath,
log_path=tmp_log_filepath,
failure_path=tmp_failure_filepath)
log_file = open(tmp_log_filepath)
log_file_str = log_file.read()
log_file.close()
fail_file = open(tmp_failure_filepath)
fail_file_str = fail_file.read()
fail_file.close()
# remove the temp files before running the test, so in
# case it fails the temp file is still cleaned up
remove(tmp_log_filepath)
remove(tmp_result_filepath)
remove(tmp_failure_filepath)
log_file_99_exp = ["OtuPicker parameters:",
"Reference seqs:%s" % abspath(ref_seqs_fp),
"Similarity:0.8",
"Application:uclust",
"enable_rev_strand_matching:False",
"suppress_sort:True",
"suppress_new_clusters:True",
"optimal:False",
"exact:False",
"Num OTUs:1",
"Num new OTUs:0",
"Num failures:2",
'max_accepts:1',
'max_rejects:8',
'stepwords:8',
'word_length:8',
"stable_sort:True",
"new_cluster_identifier:QiimeOTU",
"next_new_cluster_number:1",
"presort_by_abundance:True",
'save_uc_files:False',
'output_dir:.',
'prefilter_identical_sequences:True',
"Result path: %s" % tmp_result_filepath]
# compare data in log file to fake expected log file
# NOTE: Since app.params is a dict, the order of lines is not
# guaranteed, so testing is performed to make sure that
# the equal unordered lists of lines is present in actual and expected
self.assertItemsEqual(log_file_str.split('\n'), log_file_99_exp)
failures_file_99_exp = ["s3", "s4"]
self.assertItemsEqual(fail_file_str.split('\n'), failures_file_99_exp)
def test_default_parameters_new_clusters_allowed(self):
"""UclustReferenceOtuPicker: default parameters, new clusters allowed
"""
uc = UclustReferenceOtuPicker({'save_uc_files': False})
obs = uc(self.tmp_seq_filepath1, self.temp_ref_filepath1)
exp = {'ref1': ['uclust_test_seqs_0'],
'ref2': ['uclust_test_seqs_1'],
'ref3': ['uclust_test_seqs_2'],
'ref4': ['uclust_test_seqs_3'],
'QiimeOTU1': ['uclust_test_seqs_4'],
'QiimeOTU2': ['uclust_test_seqs_5'],
'QiimeOTU3': ['uclust_test_seqs_6'],
'QiimeOTU4': ['uclust_test_seqs_7'],
'QiimeOTU5': ['uclust_test_seqs_8'],
'QiimeOTU6': ['uclust_test_seqs_9']}
# expected number of clusters observed
self.assertEqual(len(obs), len(exp))
expected_ref_hits = ['ref1', 'ref2', 'ref3', 'ref4']
for k in expected_ref_hits:
# seqs that hit refs should have same otu_id and cluster
self.assertEqual(obs[k], exp[k])
# testing is harder for new clusters, since the otu identifiers are
# arbitrary, and otu identifier assignment is based on order of
# iteration over a dict
exp_cluster_ids = sorted(exp.keys())
exp_clusters = sorted(exp.values())
obs_cluster_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
self.assertEqual(obs_cluster_ids, exp_cluster_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_default_parameters_new_clusters_allowed_save_uc_files(self):
"""UclustReferenceOtuPicker: default parameters, saves uc file
"""
uc = UclustReferenceOtuPicker({'save_uc_files': True,
'output_dir': self.temp_dir,
'suppress_sort': True})
obs = uc(self.tmp_seq_filepath1, self.temp_ref_filepath1)
exp = {'ref1': ['uclust_test_seqs_0'],
'ref2': ['uclust_test_seqs_1'],
'ref3': ['uclust_test_seqs_2'],
'ref4': ['uclust_test_seqs_3'],
'QiimeOTU1': ['uclust_test_seqs_4'],
'QiimeOTU2': ['uclust_test_seqs_5'],
'QiimeOTU3': ['uclust_test_seqs_6'],
'QiimeOTU4': ['uclust_test_seqs_7'],
'QiimeOTU5': ['uclust_test_seqs_8'],
'QiimeOTU6': ['uclust_test_seqs_9']}
# expected number of clusters observed
self.assertEqual(len(obs), len(exp))
expected_ref_hits = ['ref1', 'ref2', 'ref3', 'ref4']
for k in expected_ref_hits:
# seqs that hit refs should have same otu_id and cluster
self.assertEqual(obs[k], exp[k])
# testing is harder for new clusters, since the otu identifiers are
# arbitrary, and otu identifier assignment is based on order of
# iteration over a dict
exp_cluster_ids = sorted(exp.keys())
exp_clusters = sorted(exp.values())
obs_cluster_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
self.assertEqual(obs_cluster_ids, exp_cluster_ids)
self.assertEqual(obs_clusters, exp_clusters)
uc_output_fp = self.tmp_seq_filepath1.replace('.fasta', '_clusters.uc')
uc_output_f = open(uc_output_fp, "U")
self._files_to_remove.append(uc_output_fp)
# Testing content of file minus header (tmp filename of sorted fasta
# file difficult to access here), and second line which could contain
# slight variations in uclust versions but still function for the
# purpose of generating correct clusters
uc_result = [line.strip() for line in uc_output_f][2:]
self.assertEqual(uc_result, expected_ref_uc_file)
def test_alt_similarity_new_clusters_allowed(self):
"""UclustReferenceOtuPicker: alt parameters, new clusters allowed
"""
uc = UclustReferenceOtuPicker({'Similarity': 0.90,
'suppress_sort': False,
'presort_by_abundance': False,
'save_uc_files': False,
'output_dir': self.temp_dir})
obs = uc(self.tmp_seq_filepath1, self.temp_ref_filepath1)
exp = {'ref1': ['uclust_test_seqs_0'],
'ref2': ['uclust_test_seqs_1'],
'ref3': ['uclust_test_seqs_2'],
'ref4': ['uclust_test_seqs_3'],
'QiimeOTU1': ['uclust_test_seqs_4'],
'QiimeOTU2': ['uclust_test_seqs_5'],
'QiimeOTU3': ['uclust_test_seqs_6', 'uclust_test_seqs_8'],
'QiimeOTU4': ['uclust_test_seqs_7'],
'QiimeOTU5': ['uclust_test_seqs_9']}
# expected number of clusters observed
self.assertEqual(len(obs), len(exp))
expected_ref_hits = ['ref1', 'ref2', 'ref3', 'ref4']
for k in expected_ref_hits:
# seqs that hit refs should have same otu_id and cluster
self.assertEqual(obs[k], exp[k])
# testing is harder for new clusters, since the otu identifiers are
# arbitrary, and otu identifier assignment is based on order of
# iteration over a dict
exp_cluster_ids = sorted(exp.keys())
exp_clusters = sorted(exp.values())
obs_cluster_ids = sorted(obs.keys())
obs_clusters = sorted(obs.values())
self.assertEqual(obs_cluster_ids, exp_cluster_ids)
self.assertEqual(obs_clusters, exp_clusters)
def test_default_parameters_new_clusters_disallowed(self):
"""UclustReferenceOtuPicker: default params, new clusters not allowed
"""
uc = UclustReferenceOtuPicker({'suppress_new_clusters': True,
'save_uc_files': False})
obs = uc(self.tmp_seq_filepath1, self.temp_ref_filepath1)
exp = {'ref1': ['uclust_test_seqs_0'],
'ref2': ['uclust_test_seqs_1'],
'ref3': ['uclust_test_seqs_2'],
'ref4': ['uclust_test_seqs_3']}
# expected number of clusters observed
self.assertEqual(obs, exp)
def test_alt_parameters_new_clusters_disallowed(self):
"""UclustReferenceOtuPicker: alt params, new clusters not allowed
"""
uc = UclustReferenceOtuPicker({'suppress_new_clusters': True,
'Similarity': 1.0,
'save_uc_files': False})
obs = uc(self.tmp_seq_filepath1, self.temp_ref_filepath1)
exp = {'ref3': ['uclust_test_seqs_2'], 'ref4': ['uclust_test_seqs_3']}
# expected number of clusters observed
self.assertEqual(obs, exp)
class CdHitOtuPickerTests(TestCase):
""" Tests of the cd-hit-based OTU picker """
def setUp(self):
# create the temporary input files
fd, self.tmp_seq_filepath1 = mkstemp(
prefix='CdHitOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath1, 'w')
seq_file.write(dna_seqs_1)
seq_file.close()
fd, self.tmp_seq_filepath2 = mkstemp(
prefix='CdHitOtuPickerTest_',
suffix='.fasta')
close(fd)
seq_file = open(self.tmp_seq_filepath2, 'w')
seq_file.write(dna_seqs_2)
seq_file.close()
self._files_to_remove =\
[self.tmp_seq_filepath1, self.tmp_seq_filepath2]
def tearDown(self):
remove_files(self._files_to_remove)
def test_call_default_params(self):
"""CdHitOtuPicker.__call__ returns expected clusters default params"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
exp = {0: ['cdhit_test_seqs_0'],
1: ['cdhit_test_seqs_1'],
2: ['cdhit_test_seqs_2'],
3: ['cdhit_test_seqs_3'],
4: ['cdhit_test_seqs_4'],
5: ['cdhit_test_seqs_5'],
6: ['cdhit_test_seqs_6'],
7: ['cdhit_test_seqs_7'],
8: ['cdhit_test_seqs_8'],
9: ['cdhit_test_seqs_9']}
app = CdHitOtuPicker(params={})
obs = app(self.tmp_seq_filepath1)
self.assertEqual(obs, exp)
def test_call_alt_threshold(self):
"""CdHitOtuPicker.__call__ returns expected clusters with alt threshold
"""
# adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs
exp = {0: ['cdhit_test_seqs_0'],
1: ['cdhit_test_seqs_1'],
2: ['cdhit_test_seqs_2'],
3: ['cdhit_test_seqs_3'],
4: ['cdhit_test_seqs_4'],
5: ['cdhit_test_seqs_5'],
6: ['cdhit_test_seqs_6', 'cdhit_test_seqs_8'],
7: ['cdhit_test_seqs_7'],
8: ['cdhit_test_seqs_9']}
app = CdHitOtuPicker(params={'Similarity': 0.90})
obs = app(self.tmp_seq_filepath1)
self.assertEqual(obs, exp)
def test_call_output_to_file(self):
"""CdHitOtuPicker.__call__ output to file functions as expected
"""
fd, tmp_result_filepath = mkstemp(
prefix='CdHitOtuPickerTest.test_call_output_to_file_',
suffix='.txt')
close(fd)
app = CdHitOtuPicker(params={'Similarity': 0.90})
obs = app(self.tmp_seq_filepath1, result_path=tmp_result_filepath)
result_file = open(tmp_result_filepath)
result_file_str = result_file.read()
result_file.close()
# remove the result file before running the test, so in
# case it fails the temp file is still cleaned up
remove(tmp_result_filepath)
# compare data in result file to fake expected file
self.assertEqual(result_file_str, dna_seqs_result_file_90_exp)
# confirm that nothing is returned when result_path is specified
self.assertEqual(obs, None)
def test_call_log_file(self):
"""CdHitOtuPicker.__call__ writes log when expected
"""
fd, tmp_log_filepath = mkstemp(
prefix='CdHitOtuPickerTest.test_call_output_to_file_l_',
suffix='.txt')
close(fd)
fd, tmp_result_filepath = mkstemp(
prefix='CdHitOtuPickerTest.test_call_output_to_file_r_',
suffix='.txt')
close(fd)
app = CdHitOtuPicker(params={'Similarity': 0.99})
obs = app(self.tmp_seq_filepath1,
result_path=tmp_result_filepath, log_path=tmp_log_filepath)
log_file = open(tmp_log_filepath)
log_file_str = log_file.read()
log_file.close()
# remove the temp files before running the test, so in
# case it fails the temp file is still cleaned up
remove(tmp_log_filepath)
remove(tmp_result_filepath)
log_file_99_exp = ["CdHitOtuPicker parameters:",
"Similarity:0.99", "Application:cdhit",
'Algorithm:cdhit: "longest-sequence-first list removal algorithm"',
'No prefix-based prefiltering.',
"Result path: %s" % tmp_result_filepath]
# compare data in log file to fake expected log file
# NOTE: Since app.params is a dict, the order of lines is not
# guaranteed, so testing is performed to make sure that
# the equal unordered lists of lines is present in actual and expected
self.assertItemsEqual(log_file_str.split('\n'), log_file_99_exp)
def test_prefilter_exact_prefixes_no_filtering(self):
""" CdHitOtuPicker._prefilter_exact_prefixes fns as expected when no seqs get filtered
"""
app = CdHitOtuPicker(params={})
seqs = [('s1', 'ACGTAA'),
('s2', 'ACGTACAA'),
('s3', 'ACGTAG'),
('s4', 'ACGTAT'),
('s5', 'ACGTCAA'),
('s6', 'ACGTCCAAAAAAAAAAAA')]
prefix_length = 6
actual = app._prefilter_exact_prefixes(seqs, prefix_length)
actual[0].sort()
expected = seqs, {'s1': ['s1'], 's2': ['s2'],
's3': ['s3'], 's4': ['s4'],
's5': ['s5'], 's6': ['s6']}
self.assertEqual(actual, expected)
# same result if prefix_length is too long
app = CdHitOtuPicker(params={})
seqs = [('s1', 'ACGTAA'),
('s2', 'ACGTACAA'),
('s3', 'ACGTAG'),
('s4', 'ACGTAT'),
('s5', 'ACGTCAA'),
('s6', 'ACGTCCAAAAAAAAAAAA')]
prefix_length = 42
actual = app._prefilter_exact_prefixes(seqs, prefix_length)
actual[0].sort()
expected = seqs, {'s1': ['s1'], 's2': ['s2'],
's3': ['s3'], 's4': ['s4'],
's5': ['s5'], 's6': ['s6']}
self.assertEqual(actual, expected)
def test_prefilter_exact_prefixes_all_to_one_filtering(self):
""" CdHitOtuPicker._prefilter_exact_prefixes fns as expected when all seqs map to one
"""
# maps to first when all are same length
app = CdHitOtuPicker(params={})
seqs = [('s1 comment', 'ACGTAA'),
('s2', 'ACGTAC'),
('s3', 'ACGTAG'),
('s4', 'ACGTAT'),
('s5', 'ACGTCA'),
('s6', 'ACGTCC')]
prefix_length = 4
actual = app._prefilter_exact_prefixes(seqs, prefix_length)
actual[0].sort()
expected = [('s1', 'ACGTAA')], {'s1':
['s1', 's2', 's3', 's4', 's5', 's6']}
self.assertEqual(actual, expected)
# maps to longest seq
app = CdHitOtuPicker(params={})
seqs = [('s1', 'ACGTAA'),
('s2', 'ACGTACA'),
('s3', 'ACGTAG'),
('s4', 'ACGTAT'),
('s5', 'ACGTCA'),
('s6', 'ACGTCC')]
prefix_length = 4
actual = app._prefilter_exact_prefixes(seqs, prefix_length)
actual[0].sort()
expected = [('s2', 'ACGTACA')], {'s2':
['s1', 's2', 's3', 's4', 's5', 's6']}
self.assertEqual(actual, expected)
# maps to longest seq
app = CdHitOtuPicker(params={})
seqs = [('s1', 'ACGTAA'),
('s2', 'ACGTACA'),
('s3', 'ACGTAGAA'),
('s4', 'ACGTATAAA'),
('s5', 'ACGTCAAAAA'),
('s6', 'ACGTCCAAAAA')]
prefix_length = 4
actual = app._prefilter_exact_prefixes(seqs, prefix_length)
actual[0].sort()
expected = [('s6', 'ACGTCCAAAAA')
], {'s6': ['s1', 's2', 's3', 's4', 's5', 's6']}
self.assertEqual(actual, expected)
def test_prefilter_exact_prefixes_filtering(self):
""" CdHitOtuPicker._prefilter_exact_prefixes fns as expected when filtering occurs
"""
# maps to first when all are same length
app = CdHitOtuPicker(params={})
seqs = [('s1', 'ACGTAA'),
('s2', 'ACGTAC'),
('s3', 'ACGTAG'),
('s4', 'ACGTAT'),
('s5', 'ACGTCA'),
('s6', 'ACGTCC')]
prefix_length = 5
actual = app._prefilter_exact_prefixes(seqs, prefix_length)
actual[0].sort()
expected = [('s1', 'ACGTAA'), ('s5', 'ACGTCA')], \
{'s1': ['s1', 's2', 's3', 's4'], 's5': ['s5', 's6']}
self.assertEqual(actual, expected)
# maps to first when all are same length
app = CdHitOtuPicker(params={})
seqs = [('s1', 'ACGTAA'),
('s2', 'ACGTAC'),
('s3', 'ACGTAGAAAA'),
('s4', 'ACGTAT'),
('s5', 'ACGTCA'),
('s6', 'ACGTCC')]
prefix_length = 5
actual = app._prefilter_exact_prefixes(seqs, prefix_length)
actual[0].sort()
expected = [('s3', 'ACGTAGAAAA'), ('s5', 'ACGTCA')], \
{'s3': ['s1', 's2', 's3', 's4'], 's5': ['s5', 's6']}
self.assertEqual(actual, expected)
def test_map_filtered_clusters_to_full_clusters(self):
"""CdHitOtuPicker._map_filtered_clusters_to_full_clusters functions as expected
"""
# original and mapped full clusters are the same
app = CdHitOtuPicker(params={})
filter_map = {'s1': ['s1'], 's2': ['s2'],
's3': ['s3'], 's4': ['s4'],
's5': ['s5'], 's6': ['s6']}
clusters = [['s1'], ['s2'], ['s3'], ['s4'], ['s5'], ['s6']]
actual = app._map_filtered_clusters_to_full_clusters(
clusters,
filter_map)
expected = clusters
self.assertEqual(actual, expected)
# original and mapped full clusters are not the same
filter_map = {'s1': ['s1', 's2', 's3', 's4'], 's5': ['s5', 's6']}
clusters = [['s1', 's5']]
actual = app._map_filtered_clusters_to_full_clusters(
clusters,
filter_map)
for e in actual:
e.sort()
expected = [['s1', 's2', 's3', 's4', 's5', 's6']]
self.assertEqual(actual, expected)
filter_map = {'s1': ['s1', 's2', 's6'],
's3': ['s3'], 's5': ['s4', 's5']}
clusters = [['s1', 's3'], ['s5']]
actual = app._map_filtered_clusters_to_full_clusters(
clusters,
filter_map)
for e in actual:
e.sort()
expected = [['s1', 's2', 's3', 's6'], ['s4', 's5']]
self.assertEqual(actual, expected)
def test_call_prefilters_when_requested(self):
""" CdHitOtuPicker.__call__ prefilters when requested
"""
# no pre-filtering results in one cluster per sequence as they all
# differ at their 3' ends
app = CdHitOtuPicker(params={})
app = CdHitOtuPicker(params={'Similarity': 0.99})
self.assertEqual(app(self.tmp_seq_filepath2), dna_seqs_2_result)
# no pre-filtering results in one cluster per sequence as they are all
# the same at their 5' ends
app = CdHitOtuPicker(params={})
app = CdHitOtuPicker(params={'Similarity': 0.99},)
self.assertEqual(
app(self.tmp_seq_filepath2, prefix_prefilter_length=5),
dna_seqs_2_result_prefilter)
class PickOtusStandaloneFunctions(TestCase):
""" Tests of stand-alone functions in pick_otus.py """
def setUp(self):
"""
"""
self.otu_map1 = {'0': ['seq1', 'seq2', 'seq5'],
'1': ['seq3', 'seq4'],
'2': ['seq6', 'seq7', 'seq8']}
self.otu_map2 = {'110': ['0', '2'],
'221': ['1']}
self.otu_map3 = {'a': ['110', '221']}
self.otu_map1_file = ['0\tseq1\tseq2\tseq5',
'1\tseq3\tseq4',
'2\tseq6\tseq7\tseq8']
self.otu_map2_file = ['110\t0\t2',
'221\t1']
self.otu_map3_file = ['a\t110\t221']
self.failures1 = ['110']
self.failures2 = ['110\n', '221']
self.failures3 = ['a']
def test_expand_failures_one_otu_map(self):
"""expanding failures generated by chained otu picking fns as expected
"""
expected_f1 = ['0', '2']
self.assertItemsEqual(expand_failures(self.failures1, self.otu_map2),
expected_f1)
expected_f2 = ['0', '1', '2']
self.assertItemsEqual(expand_failures(self.failures2, self.otu_map2),
expected_f2)
def test_expand_failures_two_otu_maps(self):
"""expanding failures generated by chained otu picking fns as expected
"""
expected_f1 = ['seq1', 'seq2', 'seq5', 'seq6', 'seq7', 'seq8']
actual = expand_failures(self.failures1,
expand_otu_map_seq_ids(self.otu_map2, self.otu_map1))
self.assertItemsEqual(actual, expected_f1)
def test_map_otu_map_files_failures_file_two_otu_maps1(self):
"""map_otu_map_files: correctly maps two otu files and failures
"""
exp = ['seq1', 'seq2', 'seq5', 'seq6', 'seq7', 'seq8']
actual = map_otu_map_files(
[self.otu_map1_file, self.otu_map2_file],
self.failures1)
self.assertEqual(actual, exp)
def test_map_otu_map_files_failures_file_two_otu_maps2(self):
"""map_otu_map_files: correctly maps two otu files and failures (alt failures)
"""
exp = ['seq1', 'seq2', 'seq5', 'seq6', 'seq7', 'seq8', 'seq3', 'seq4']
actual = map_otu_map_files(
[self.otu_map1_file, self.otu_map2_file],
self.failures2)
self.assertEqual(actual, exp)
def test_map_otu_map_files_failures_file_three_otu_maps(self):
"""map_otu_map_files: correctly maps three otu files and failures
"""
exp = ['seq1', 'seq2', 'seq5', 'seq6', 'seq7', 'seq8', 'seq3', 'seq4']
actual = map_otu_map_files(
[self.otu_map1_file, self.otu_map2_file, self.otu_map3_file],
self.failures3)
self.assertEqual(actual, exp)
def test_expand_otu_map_seq_ids_error(self):
"""expand_otu_map_seq_ids: error on missing seq_ids
"""
self.assertRaises(KeyError, expand_otu_map_seq_ids,
self.otu_map3, self.otu_map1)
def test_expand_otu_map_seq_ids_two(self):
"""expand_otu_map_seq_ids: correctly maps seq_ids from two otu maps
"""
exp12 = {'110': ['seq1', 'seq2', 'seq5', 'seq6', 'seq7', 'seq8'],
'221': ['seq3', 'seq4']}
actual12 = expand_otu_map_seq_ids(self.otu_map2, self.otu_map1)
self.assertEqual(actual12, exp12)
def test_expand_otu_map_seq_ids_three(self):
"""expand_otu_map_seq_ids: correctly maps seq_ids from three otu maps
"""
exp123 = {'a': ['seq1', 'seq2', 'seq5', 'seq6',
'seq7', 'seq8', 'seq3', 'seq4']}
actual123 = expand_otu_map_seq_ids(self.otu_map3,
expand_otu_map_seq_ids(self.otu_map2, self.otu_map1))
self.assertEqual(actual123, exp123)
def test_map_otu_map_files_two(self):
"""map_otu_map_files: correctly maps two otu files
"""
exp12 = {'110': ['seq1', 'seq2', 'seq5', 'seq6', 'seq7', 'seq8'],
'221': ['seq3', 'seq4']}
actual12 = map_otu_map_files([self.otu_map1_file, self.otu_map2_file])
self.assertEqual(exp12, actual12)
def test_map_otu_map_files_three(self):
"""map_otu_map_files: correctly maps three otu files
"""
exp123 = {'a': ['seq1', 'seq2', 'seq5', 'seq6',
'seq7', 'seq8', 'seq3', 'seq4']}
actual123 = map_otu_map_files(
[self.otu_map1_file, self.otu_map2_file, self.otu_map3_file])
self.assertEqual(exp123, actual123)
# third 'file' contains mixed tabs and spaces
actual123 = map_otu_map_files(
[self.otu_map1_file, self.otu_map2_file, ['a\t110 221']])
self.assertEqual(exp123, actual123)
dna_seqs_1 = """>cdhit_test_seqs_0 comment fields, not part of sequence identifiers
AACCCCCACGGTGGATGCCACACGCCCCATACAAAGGGTAGGATGCTTAAGACACATCGCGTCAGGTTTGTGTCAGGCCT
> cdhit_test_seqs_1
ACCCACACGGTGGATGCAACAGATCCCATACACCGAGTTGGATGCTTAAGACGCATCGCGTGAGTTTTGCGTCAAGGCT
>cdhit_test_seqs_2
CCCCCACGGTGGCAGCAACACGTCACATACAACGGGTTGGATTCTAAAGACAAACCGCGTCAAAGTTGTGTCAGAACT
>cdhit_test_seqs_3
CCCCACGGTAGCTGCAACACGTCCCATACCACGGGTAGGATGCTAAAGACACATCGGGTCTGTTTTGTGTCAGGGCT
>cdhit_test_seqs_4
GCCACGGTGGGTACAACACGTCCACTACATCGGCTTGGAAGGTAAAGACACGTCGCGTCAGTATTGCGTCAGGGCT
>cdhit_test_seqs_5
CCGCGGTAGGTGCAACACGTCCCATACAACGGGTTGGAAGGTTAAGACACAACGCGTTAATTTTGTGTCAGGGCA
>cdhit_test_seqs_6
CGCGGTGGCTGCAAGACGTCCCATACAACGGGTTGGATGCTTAAGACACATCGCAACAGTTTTGAGTCAGGGCT
>cdhit_test_seqs_7
ACGGTGGCTACAAGACGTCCCATCCAACGGGTTGGATACTTAAGGCACATCACGTCAGTTTTGTGTCAGAGCT
>cdhit_test_seqs_8
CGGTGGCTGCAACACGTGGCATACAACGGGTTGGATGCTTAAGACACATCGCCTCAGTTTTGTGTCAGGGCT
>cdhit_test_seqs_9
GGTGGCTGAAACACATCCCATACAACGGGTTGGATGCTTAAGACACATCGCATCAGTTTTATGTCAGGGGA"""
dna_seqs_result_file_90_exp = """0\tcdhit_test_seqs_0
1\tcdhit_test_seqs_1
2\tcdhit_test_seqs_2
3\tcdhit_test_seqs_3
4\tcdhit_test_seqs_4
5\tcdhit_test_seqs_5
6\tcdhit_test_seqs_6\tcdhit_test_seqs_8
7\tcdhit_test_seqs_7
8\tcdhit_test_seqs_9
"""
dna_seqs_2 = """>cdhit_test_seqs_0 comment fields, not part of sequence identifiers
ACACCCCGGGGGTTTACATTTTTTTTTTTTTTTTTTTTTTTT
>cdhit_test_seqs_1
ACACCCCGGGGGTTTACACCAACATACACCGAGTTGGA
>cdhit_test_seqs_2
ACACCCCGGGGGTTTACGGGGGGGGGGGGGGGGGGGGGGGGGG"""
# results are in length order
dna_seqs_2_result = {0: ['cdhit_test_seqs_2'],
1: ['cdhit_test_seqs_0'],
2: ['cdhit_test_seqs_1']}
dna_seqs_2_result_prefilter =\
{0: ['cdhit_test_seqs_0', 'cdhit_test_seqs_1', 'cdhit_test_seqs_2']}
dna_seqs_3 = """>uclust_test_seqs_0 some comment0
AACCCCCACGGTGGATGCCACACGCCCCATACAAAGGGTAGGATGCTTAAGACACATCGCGTCAGGTTTGTGTCAGGCCT
>uclust_test_seqs_1 some comment1
ACCCACACGGTGGATGCAACAGATCCCATACACCGAGTTGGATGCTTAAGACGCATCGCGTGAGTTTTGCGTCAAGGCT
>uclust_test_seqs_2 some comment2
CCCCCACGGTGGCAGCAACACGTCACATACAACGGGTTGGATTCTAAAGACAAACCGCGTCAAAGTTGTGTCAGAACT
>uclust_test_seqs_3 some comment3
CCCCACGGTAGCTGCAACACGTCCCATACCACGGGTAGGATGCTAAAGACACATCGGGTCTGTTTTGTGTCAGGGCT
>uclust_test_seqs_4 some comment4
GCCACGGTGGGTACAACACGTCCACTACATCGGCTTGGAAGGTAAAGACACGTCGCGTCAGTATTGCGTCAGGGCT
>uclust_test_seqs_5 some comment4_again
CCGCGGTAGGTGCAACACGTCCCATACAACGGGTTGGAAGGTTAAGACACAACGCGTTAATTTTGTGTCAGGGCA
>uclust_test_seqs_6 some comment6
CGCGGTGGCTGCAAGACGTCCCATACAACGGGTTGGATGCTTAAGACACATCGCAACAGTTTTGAGTCAGGGCT
>uclust_test_seqs_7 some comment7
ACGGTGGCTACAAGACGTCCCATCCAACGGGTTGGATACTTAAGGCACATCACGTCAGTTTTGTGTCAGAGCT
>uclust_test_seqs_8 some comment8
CGGTGGCTGCAACACGTGGCATACAACGGGTTGGATGCTTAAGACACATCGCCTCAGTTTTGTGTCAGGGCT
>uclust_test_seqs_9 some comment9
GGTGGCTGAAACACATCCCATACAACGGGTTGGATGCTTAAGACACATCGCATCAGTTTTATGTCAGGGGA"""
dna_seqs_3_derep = """>uclust_test_seqs_0 some comment0
AACCCCCACGGTGGATGCCACACGCCCCATACAAAGGGTAGGATGCTTAAGACACATCGCGTCAGGTTTGTGTCAGGCCT
>uclust_test_seqs_1 some comment1
ACCCACACGGTGGATGCAACAGATCCCATACACCGAGTTGGATGCTTAAGACGCATCGCGTGAGTTTTGCGTCAAGGCT
>uclust_test_seqs_1rep some comment1rep
ACCCACACGGTGGATGCAACAGATCCCATACACCGAGTTGGATGCTTAAGACGCATCGCGTGAGTTTTGCGTCAAGGCT
>uclust_test_seqs_1rep2 some comment1rep2
ACCCACACGGTGGATGCAACAGATCCCATACACCGAGTTGGATGCTTAAGACGCATCGCGTGAGTTTTGCGTCAAGGCT
>uclust_test_seqs_2 some comment2
CCCCCACGGTGGCAGCAACACGTCACATACAACGGGTTGGATTCTAAAGACAAACCGCGTCAAAGTTGTGTCAGAACT
>uclust_test_seqs_3 some comment3
CCCCACGGTAGCTGCAACACGTCCCATACCACGGGTAGGATGCTAAAGACACATCGGGTCTGTTTTGTGTCAGGGCT
>uclust_test_seqs_4 some comment4
GCCACGGTGGGTACAACACGTCCACTACATCGGCTTGGAAGGTAAAGACACGTCGCGTCAGTATTGCGTCAGGGCT
>uclust_test_seqs_5 some comment4_again
CCGCGGTAGGTGCAACACGTCCCATACAACGGGTTGGAAGGTTAAGACACAACGCGTTAATTTTGTGTCAGGGCA
>uclust_test_seqs_6 some comment6
CGCGGTGGCTGCAAGACGTCCCATACAACGGGTTGGATGCTTAAGACACATCGCAACAGTTTTGAGTCAGGGCT
>uclust_test_seqs_7 some comment7
ACGGTGGCTACAAGACGTCCCATCCAACGGGTTGGATACTTAAGGCACATCACGTCAGTTTTGTGTCAGAGCT
>uclust_test_seqs_8 some comment8
CGGTGGCTGCAACACGTGGCATACAACGGGTTGGATGCTTAAGACACATCGCCTCAGTTTTGTGTCAGGGCT
>uclust_test_seqs_9 some comment9
GGTGGCTGAAACACATCCCATACAACGGGTTGGATGCTTAAGACACATCGCATCAGTTTTATGTCAGGGGA
>uclust_test_seqs_9rep some comment9rep
GGTGGCTGAAACACATCCCATACAACGGGTTGGATGCTTAAGACACATCGCATCAGTTTTATGTCAGGGGA
"""
uclustref_query_seqs1 = """>uclust_test_seqs_0 some comment aaa
ACGGTGGCTACAAGACGTCCCATCCAACGGGTTGGATACTTAAGGCACATCACGTCAGTTTTGTGTCAGAGCT
>uclust_test_seqs_1 some comment bbb
GCCACGGTGGGTACAACACGTCCACTACATCGGCTTGGAAGGTAAAGACACGTCGCGTCAGTATTGCGTCAGGGCT
>uclust_test_seqs_2 some comment vv
CCCCCACGGTGGCAGCAACACGTCACATACAACGGGTTGGATTCTAAAGACAAACCGCGTCAAAGTTGTGTCAGAACT
>uclust_test_seqs_3 some comment
CCCCACGGTAGCTGCAACACGTCCCATACCACGGGTAGGATGCTAAAGACACATCGGGTCTGTTTTGTGTCAGGGCT
>uclust_test_seqs_4 some comment
ACCCACACGGTGGATGCAACAGATCCCATACACCGAGTTGGATGCTTAAGACGCATCGCGTGAGTTTTGCGTCAAGGCT
>uclust_test_seqs_5 some comment
CCGCGGTAGGTGCAACACGTCCCATACAACGGGTTGGAAGGTTAAGACACAACGCGTTAATTTTGTGTCAGGGCA
>uclust_test_seqs_6 some comment6
CGCGGTGGCTGCAAGACGTCCCATACAACGGGTTGGATGCTTAAGACACATCGCAACAGTTTTGAGTCAGGGCT
>uclust_test_seqs_7 some comment
AACCCCCACGGTGGATGCCACACGCCCCATACAAAGGGTAGGATGCTTAAGACACATCGCGTCAGGTTTGTGTCAGGCCT
>uclust_test_seqs_8 some comment8
CGGTGGCTGCAACACGTGGCATACAACGGGTTGGATGCTTAAGACACATCGCCTCAGTTTTGTGTCAGGGCT
>uclust_test_seqs_9 some comment
GGTGGCTGAAACACATCCCATACAACGGGTTGGATGCTTAAGACACATCGCATCAGTTTTATGTCAGGGGA
"""
uclustref_ref_seqs1 = """>ref1 25 random bases appended to uclust_test_seqs_0 and one mismatch
ACGGTGGCTACAAGACGTCCCATCCAACGGGTTGGATATTTAAGGCACATCACGTCAGTTTTGTGTCAGAGCTATAGCAGCCCCAGCGTTTACTTCTA
>ref2 15 random bases prepended to uclust_test_seqs_1 and one mismatch
GCTGCGGCGTCCTGCGCCACGGTGGGTACAACACGTCCACTACATCTGCTTGGAAGGTAAAGACACGTCGCGTCAGTATTGCGTCAGGGCT
>ref3 5 random bases prepended and 10 random bases appended to uclust_test_seqs_2
ATAGGCCCCCACGGTGGCAGCAACACGTCACATACAACGGGTTGGATTCTAAAGACAAACCGCGTCAAAGTTGTGTCAGAACTGCCTGATTCA
>ref4 exact match to uclust_test_seqs_3
CCCCACGGTAGCTGCAACACGTCCCATACCACGGGTAGGATGCTAAAGACACATCGGGTCTGTTTTGTGTCAGGGCT
"""
dna_seqs_3_result_file_90_exp = """0\tuclust_test_seqs_0
1\tuclust_test_seqs_1
2\tuclust_test_seqs_2
3\tuclust_test_seqs_3
4\tuclust_test_seqs_4
5\tuclust_test_seqs_5
6\tuclust_test_seqs_6\tuclust_test_seqs_8
7\tuclust_test_seqs_7
8\tuclust_test_seqs_9
"""
dna_seqs_4 = """>uclust_test_seqs_0 comment fields, not part of sequence identifiers
ACACCCCGGGGGTTTACATTTTTTTTTTTTTTTTTTTTTTTT
>uclust_test_seqs_1 blah blah blah
ACACCCCGGGGGTTTACACCAACATACACCGAGTTGGA
>uclust_test_seqs_2 blah blah
ACACCCCGGGGGTTTACGGGGGGGGGGGGGGGGGGGGGGGGGG"""
# results are in length order
dna_seqs_4_result = {0: ['uclust_test_seqs_2'],
1: ['uclust_test_seqs_0'],
2: ['uclust_test_seqs_1']}
dna_seqs_5 = """>uclust_test_seqs_0 some comment
ACGGTGGCTACAAGACGTCCCATCCAACGGGTTGGATACTTAAGGCACATCACGTCAGTTTTGTGTCAGAGCT
>uclust_test_seqs_0_rc some other comment
AGCTCTGACACAAAACTGACGTGATGTGCCTTAAGTATCCAACCCGTTGGATGGGACGTCTTGTAGCCACCGT
"""
dna_seqs_6 = """>uclust_test_seqs_0 some comment0
AACCCCCACGGTGGATGCCACACGCCCCATACAAAGGGTAGGATGCTTAAGACACATCGCGTCAGGTTTGTGTCAGGCCT
>uclust_test_seqs_1 some comment1
AACCCCCACGGTGGATGCCACACGCCCCATACAAAGGGTAGGATGCTTAAGACACATCGCGTCAGGTTTGTGTCAGGCCT
>uclust_test_seqs_2 some comment2
CCCCCACGGTGGCAGCAACACGTCACATACAACGGGTTGGATTCTAAAGACAAACCGCGTCAAAGTTGTGTCAGAACT
>uclust_test_seqs_3 some comment3
CCCCACGGTAGCTGCAACACGTCCCATACCACGGGTAGGATGCTAAAGACACATCGGGTCTGTTTTGTGTCAGGGCT
>uclust_test_seqs_4 some comment4
GCCACGGTGGGTACAACACGTCCACTACATCGGCTTGGAAGGTAAAGACACGTCGCGTCAGTATTGCGTCAGGGCT
>uclust_test_seqs_5 some comment4_again
AACCCCCACGGTGGATGCCACACGCCCCATACAAAGGGTAGGATGCTTAAGACACATCGCGTCAGGTTTGTGTCAGGCCT
>uclust_test_seqs_6 some comment6
CGCGGTGGCTGCAAGACGTCCCATACAACGGGTTGGATGCTTAAGACACATCGCAACAGTTTTGAGTCAGGGCT
>uclust_test_seqs_7 some comment7
AACCCCCACGGTGGATGCCACACGCCCCATACCAAGGGTAGGATGCTTAAGACACATCGCGTCAGGTTTGTGTCAGGCCT
>uclust_test_seqs_8 some comment8
CGGTGGCTGCAACACGTGGCATACAACGGGTTGGATGCTTAAGACACATCGCCTCAGTTTTGTGTCAGGGCT
>uclust_test_seqs_9 some comment9
GGTGGCTGAAACACATCCCATACAACGGGTTGGATGCTTAAGACACATCGCATCAGTTTTATGTCAGGGGA"""
dna_seqs_4_result_prefilter =\
{0: ['uclust_test_seqs_0', 'uclust_test_seqs_1', 'uclust_test_seqs_2']}
expected_uc_output =\
['# Tab-separated fields:',
'# 1=Type, 2=ClusterNr, 3=SeqLength or ClusterSize, 4=PctId, 5=Strand, 6=QueryStart, 7=SeedStart, 8=Alignment, 9=QueryLabel, 10=TargetLabel',
'# Record types (field 1): L=LibSeed, S=NewSeed, H=Hit, R=Reject, D=LibCluster, C=NewCluster, N=NoHit',
'# For C and D types, PctId is average id with seed.',
'# QueryStart and SeedStart are zero-based relative to start of sequence.',
'# If minus strand, SeedStart is relative to reverse-complemented seed.',
'S\t0\t71\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_9\t*',
'S\t1\t76\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_4\t*',
'S\t2\t72\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_8\t*',
'S\t3\t74\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_6\t*',
'S\t4\t75\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_5\t*',
'S\t5\t78\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_2\t*',
'S\t6\t77\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_3\t*',
'S\t7\t73\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_7\t*',
'S\t8\t79\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_1\t*',
'S\t9\t80\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_0\t*',
'C\t0\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_9\t*',
'C\t1\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_4\t*',
'C\t2\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_8\t*',
'C\t3\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_6\t*',
'C\t4\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_5\t*',
'C\t5\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_2\t*',
'C\t6\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_3\t*',
'C\t7\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_7\t*',
'C\t8\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_1\t*',
'C\t9\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_0\t*']
expected_ref_uc_file =\
['# Tab-separated fields:',
'# 1=Type, 2=ClusterNr, 3=SeqLength or ClusterSize, 4=PctId, 5=Strand, 6=QueryStart, 7=SeedStart, 8=Alignment, 9=QueryLabel, 10=TargetLabel',
'# Record types (field 1): L=LibSeed, S=NewSeed, H=Hit, R=Reject, D=LibCluster, C=NewCluster, N=NoHit', '# For C and D types, PctId is average id with seed.',
'# QueryStart and SeedStart are zero-based relative to start of sequence.',
'# If minus strand, SeedStart is relative to reverse-complemented seed.',
'S\t4\t71\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_9\t*',
'L\t1\t91\t*\t*\t*\t*\t*\tref2 15 random bases prepended to uclust_test_seqs_1 and one mismatch\t*',
'H\t1\t76\t98.7\t+\t0\t0\t15I76M\tQiimeExactMatch.uclust_test_seqs_1\tref2 15 random bases prepended to uclust_test_seqs_1 and one mismatch',
'S\t5\t72\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_8\t*',
'S\t6\t74\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_6\t*',
'S\t7\t75\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_5\t*',
'L\t2\t93\t*\t*\t*\t*\t*\tref3 5 random bases prepended and 10 random bases appended to uclust_test_seqs_2\t*',
'H\t2\t78\t100.0\t+\t0\t0\t5I78M10I\tQiimeExactMatch.uclust_test_seqs_2\tref3 5 random bases prepended and 10 random bases appended to uclust_test_seqs_2',
'L\t3\t77\t*\t*\t*\t*\t*\tref4 exact match to uclust_test_seqs_3\t*',
'H\t3\t77\t100.0\t+\t0\t0\t77M\tQiimeExactMatch.uclust_test_seqs_3\tref4 exact match to uclust_test_seqs_3',
'L\t0\t98\t*\t*\t*\t*\t*\tref1 25 random bases appended to uclust_test_seqs_0 and one mismatch\t*',
'H\t0\t73\t98.6\t+\t0\t0\t73M25I\tQiimeExactMatch.uclust_test_seqs_0\tref1 25 random bases appended to uclust_test_seqs_0 and one mismatch',
'S\t8\t79\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_4\t*',
'S\t9\t80\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_7\t*',
'D\t0\t2\t*\t*\t*\t*\t98.6\tref1 25 random bases appended to uclust_test_seqs_0 and one mismatch\t*',
'D\t1\t2\t*\t*\t*\t*\t98.7\tref2 15 random bases prepended to uclust_test_seqs_1 and one mismatch\t*',
'D\t2\t2\t*\t*\t*\t*\t100.0\tref3 5 random bases prepended and 10 random bases appended to uclust_test_seqs_2\t*',
'D\t3\t2\t*\t*\t*\t*\t100.0\tref4 exact match to uclust_test_seqs_3\t*',
'C\t4\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_9\t*',
'C\t5\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_8\t*',
'C\t6\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_6\t*',
'C\t7\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_5\t*',
'C\t8\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_4\t*',
'C\t9\t1\t*\t*\t*\t*\t*\tQiimeExactMatch.uclust_test_seqs_7\t*']
usearch_ref_seqs1 = """>ref1 ecoli sequence
CGCGTGTATGAAGAAGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAGCACCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCA
>EU199232 1 1236 Bacteria/Deltaproteobacteria/Desulfurella - Hippea/uncultured
TACGCGCGGAAATCGAGCGAGATTGGGAACGCAAGTTCCTGAGTATTGCGGCGAACGGGTGAGTAAGACGTGGGTGATCTACCCCTAGGGTGGGAATAACCCGGGGAAACCCGGGCTAATACCGAATAAGACCACAGGAGGCGACTCCAGAGGGTCAAAGGGAGCCTTGGCCTCCCCC
>L07864 1 1200 Bacteria/Beta Gammaproteobacteria/Solemya symbiont
GGCTCAGATTGAACGCTGGCGGCATGCCTAACACATGCAAGTCGAACGGTAACAGGCGGAGCTTGCTCTGCGCTGACGAGTGGCGGACGGGTGAGTAATGCATGGGAATCTGCCATATAGTGGGGGACAACTGGGGAAACCCAGGCTAATACCGCATAATCTCTACGGAGGAAAGGCTTC
"""
dna_seqs_usearch = """>usearch_ecoli_seq
CGCGTGTATGAAGAAGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAGCACCGGCTAACTCCGT
>Solemya seq
GGCTCAGATTGAACGCTGGCGGCATGCCTAACACATGCAAGTCGAACGGTAACAGGCGGAGCTTGCTCTGCGCTGACGAGTGGCGGACGGGTGAGTA
>usearch_ecoli_seq2
CGCGTGTATGAAGAAGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAGCACCGGCTAACTCCGTCCAT
>Solemya_seq2
GGCTCAGATTGAACGCTGGCGGCATGCCTAACACATGCAAGTCGAACGGTAACAGGCGGAGCTTGCTCTGCGCTGACGAGTGGCGGACGGGTGAGTATCAAG
>chimera
CGCGTGTATGAAGAAGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACCCCTAGGGTGGGAATAACCCGGGGAAACCCGGGCTAATACCGAATAAGACCACAGGAGGCGACTCCAGAGGGTCAAAGGGAGCCTTGGCCTCCCCC
"""
dna_seqs_usearch_97perc_id = """>usearch_ecoli_seq
CGCGTGTATGAAGAAGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAG
>usearch_ecoli_seq_1bp_change
CGCGTGTATGAAGAAGGCCTACGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAG
>usearch_ecoli_seq_2bp_change
CGCGTGTATGAAGAAGGCCTACGGGTTGTAAAGTACTTTCAGCGGGGCGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAG
"""
dna_seqs_usearch_97perc_id_len_diff = """>usearch_ecoli_seq
CGCGTGTATGAAGAAGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAG
>usearch_ecoli_seq_1bp_change
CGCGTGTATGAAGAAGGCCTACGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAGA
>usearch_ecoli_seq_2bp_change
CGCGTGTATGAAGAAGGCCTACGGGTTGTAAAGTACTTTCAGCGGGGCGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAGAAA
"""
dna_seqs_usearch_97perc_dups = """>usearch_ecoli_seq
CGCGTGTATGAAGAAGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAG
>usearch_ecoli_seq_1bp_change
CGCGTGTATGAAGAAGGCCTACGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAGA
>usearch_ecoli_seq_2bp_change
CGCGTGTATGAAGAAGGCCTACGGGTTGTAAAGTACTTTCAGCGGGGCGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAGAAA
>usearch_ecoli_seq_1bp_change_dup1
CGCGTGTATGAAGAAGGCCTACGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAGA
>usearch_ecoli_seq_1bp_change_dup2
CGCGTGTATGAAGAAGGCCTACGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAGA
"""
dna_seqs_usearch_97perc_id_rc = """>usearch_ecoli_seq
CGCGTGTATGAAGAAGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAG
>usearch_ecoli_seq_1bp_change
CGCGTGTATGAAGAAGGCCTACGGGTTGTAAAGTACTTTCAGCGGGGAGGAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAGAAGAAG
>usearch_ecoli_seq_2bp_change_rc
CTTCTTCTGCGGGTAACGTCAATGAGCAAAGGTATTAACTTTACTCCCTCCGCCCCGCTGAAAGTACTTTACAACCCGTAGGCCTTCTTCATACACGCG
"""
dna_seqs_rc_single_seq = """>usearch_ecoli_seq_2bp_change_rc
CTTCTTCTGCGGGTAACGTCAATGAGCAAAGGTATTAACTTTACTCCCTCCGCCCCGCTGAAAGTACTTTACAACCCGTAGGCCTTCTTCATACACGCG
"""
# Reads to cluster
# there are 30 reads representing 3 species (gives 3 clusters)
sumaclust_reads_seqs = """>s1_630 reference=1049393 amplicon=complement(497..788)
GTGCCAGCAGCCGCGGTAATACAGAGGTCTCAAGCGTTGTTCGGATTCATTGGGCGTAAAGGGTGCGTAGGTGGCGGGGTAAGTCAGGTGTGAAATCTCG
>s1_2369 reference=1049393 amplicon=complement(497..788) errors=73%A
GTGCCAGCAGCCGCGGTAATACAGAGGTCTCAAGCGTTGTTCGGATTCATTGGGCGTAAAGGGTGCGTAGGTAGCGGGGTAAGTCAGGTGTGAAATCTCG
>s1_3750 reference=1049393 amplicon=complement(497..788) errors=100%A
GTGCCAGCAGCCGCGGTAATACAGAGGTCTCAAGCGTTGTTCGGATTCATTGGGCGTAAAGGGTGCGTAGGTGGCGGGGTAAGTCAGGTGTGAAATCTCA
>s1_4572 reference=1049393 amplicon=complement(497..788)
GTGCCAGCAGCCGCGGTAATACAGAGGTCTCAAGCGTTGTTCGGATTCATTGGGCGTAAAGGGTGCGTAGGTGGCGGGGTAAGTCAGGTGTGAAATCTCG
>s1_5748 reference=1049393 amplicon=complement(497..788)
GTGCCAGCAGCCGCGGTAATACAGAGGTCTCAAGCGTTGTTCGGATTCATTGGGCGTAAAGGGTGCGTAGGTGGCGGGGTAAGTCAGGTGTGAAATCTCG
>s1_6846 reference=1049393 amplicon=complement(497..788) errors=67%A
GTGCCAGCAGCCGCGGTAATACAGAGGTCTCAAGCGTTGTTCGGATTCATTGGGCGTAAAGGGTGCATAGGTGGCGGGGTAAGTCAGGTGTGAAATCTCG
>s1_7634 reference=1049393 amplicon=complement(497..788) errors=99%T
GTGCCAGCAGCCGCGGTAATACAGAGGTCTCAAGCGTTGTTCGGATTCATTGGGCGTAAAGGGTGCGTAGGTGGCGGGGTAAGTCAGGTGTGAAATCTTG
>s1_8623 reference=1049393 amplicon=complement(497..788) errors=17-
GTGCCAGCAGCCGCGGAATACAGAGGTCTCAAGCGTTGTTCGGATTCATTGGGCGTAAAGGGTGCGTAGGTGGCGGGGTAAGTCAGGTGTGAAATCTCG
>s1_8744 reference=1049393 amplicon=complement(497..788) errors=62%A
GTGCCAGCAGCCGCGGTAATACAGAGGTCTCAAGCGTTGTTCGGATTCATTGGGCGTAAAGAGTGCGTAGGTGGCGGGGTAAGTCAGGTGTGAAATCTCG
>s1_13961 reference=1049393 amplicon=complement(497..788)
GTGCCAGCAGCCGCGGTAATACAGAGGTCTCAAGCGTTGTTCGGATTCATTGGGCGTAAAGGGTGCGTAGGTGGCGGGGTAAGTCAGGTGTGAAATCTCG
>s1_4677 reference=4382408 amplicon=complement(487..778) errors=74%T
GTGCCAGCAGCCGCGGTAATACGGAGGGTCCAAGCGTTGTCCGGAATCACTGGGTGTAAAGGGTGCGTAGGCGTGTCTGTAAGTCAGAGGTGAAAGCCCA
>s1_8592 reference=4382408 amplicon=complement(487..778) errors=95+A
GTGCCAGCAGCCGCGGTAATACGGAGGGTCCAAGCGTTGTCCGGAATCACTGGGTGTAAAGGGTGCGTAGGCGGGTCTGTAAGTCAGAGGTGAAAAGCCCA
>s1_8977 reference=4382408 amplicon=complement(487..778)
GTGCCAGCAGCCGCGGTAATACGGAGGGTCCAAGCGTTGTCCGGAATCACTGGGTGTAAAGGGTGCGTAGGCGGGTCTGTAAGTCAGAGGTGAAAGCCCA
>s1_10439 reference=4382408 amplicon=complement(487..778)
GTGCCAGCAGCCGCGGTAATACGGAGGGTCCAAGCGTTGTCCGGAATCACTGGGTGTAAAGGGTGCGTAGGCGGGTCTGTAAGTCAGAGGTGAAAGCCCA
>s1_11001 reference=4382408 amplicon=complement(487..778) errors=91%G
GTGCCAGCAGCCGCGGTAATACGGAGGGTCCAAGCGTTGTCCGGAATCACTGGGTGTAAAGGGTGCGTAGGCGGGTCTGTAAGTCAGAGGGGAAAGCCCA
>s1_11650 reference=4382408 amplicon=complement(487..778) errors=78-
GTGCCAGCAGCCGCGGTAATACGGAGGGTCCAAGCGTTGTCCGGAATCACTGGGTGTAAAGGGTGCGTAGGCGGGTCGTAAGTCAGAGGTGAAAGCCCA
>s1_12366 reference=4382408 amplicon=complement(487..778)
GTGCCAGCAGCCGCGGTAATACGGAGGGTCCAAGCGTTGTCCGGAATCACTGGGTGTAAAGGGTGCGTAGGCGGGTCTGTAAGTCAGAGGTGAAAGCCCA
>s1_14735 reference=4382408 amplicon=complement(487..778) errors=94%C
GTGCCAGCAGCCGCGGTAATACGGAGGGTCCAAGCGTTGTCCGGAATCACTGGGTGTAAAGGGTGCGTAGGCGGGTCTGTAAGTCAGAGGTGACAGCCCA
>s1_15985 reference=4382408 amplicon=complement(487..778)
GTGCCAGCAGCCGCGGTAATACGGAGGGTCCAAGCGTTGTCCGGAATCACTGGGTGTAAAGGGTGCGTAGGCGGGTCTGTAAGTCAGAGGTGAAAGCCCA
>s1_21935 reference=4382408 amplicon=complement(487..778)
GTGCCAGCAGCCGCGGTAATACGGAGGGTCCAAGCGTTGTCCGGAATCACTGGGTGTAAAGGGTGCGTAGGCGGGTCTGTAAGTCAGAGGTGAAAGCCCA
>s1_844 reference=129416 amplicon=complement(522..813)
GTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTATTCGGAATTACTGGGCGTAAAGGGCGTGTAGGCGGCTTTGTAAGTCAGATGTGAAAGCCCA
>s1_1271 reference=129416 amplicon=complement(522..813) errors=94%C
GTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTATTCGGAATTACTGGGCGTAAAGGGCGTGTAGGCGGCTTTGTAAGTCAGATGTGACAGCCCA
>s1_1886 reference=129416 amplicon=complement(522..813)
GTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTATTCGGAATTACTGGGCGTAAAGGGCGTGTAGGCGGCTTTGTAAGTCAGATGTGAAAGCCCA
>s1_5347 reference=129416 amplicon=complement(522..813)
GTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTATTCGGAATTACTGGGCGTAAAGGGCGTGTAGGCGGCTTTGTAAGTCAGATGTGAAAGCCCA
>s1_5737 reference=129416 amplicon=complement(522..813)
GTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTATTCGGAATTACTGGGCGTAAAGGGCGTGTAGGCGGCTTTGTAAGTCAGATGTGAAAGCCCA
>s1_6200 reference=129416 amplicon=complement(522..813) errors=92%C
GTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTATTCGGAATTACTGGGCGTAAAGGGCGTGTAGGCGGCTTTGTAAGTCAGATGTCAAAGCCCA
>s1_7014 reference=129416 amplicon=complement(522..813)
GTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTATTCGGAATTACTGGGCGTAAAGGGCGTGTAGGCGGCTTTGTAAGTCAGATGTGAAAGCCCA
>s1_7040 reference=129416 amplicon=complement(522..813) errors=40%G
GTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTAGTCGGAATTACTGGGCGTAAAGGGCGTGTAGGCGGCTTTGTAAGTCAGATGTGAAAGCCCA
>s1_7881 reference=129416 amplicon=complement(522..813)
GTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTATTCGGAATTACTGGGCGTAAAGGGCGTGTAGGCGGCTTTGTAAGTCAGATGTGAAAGCCCA
>s1_8615 reference=129416 amplicon=complement(522..813) errors=81%G
GTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTATTCGGAATTACTGGGCGTAAAGGGCGTGTAGGCGGCTTTGTGAGTCAGATGTGAAAGCCCA
"""
# Reference sequence database
sortmerna_reference_seqs_fp = """>426848
AGAGTTTGATCCTGGCTCAGGATGAACGCTAGCGGCAGGCTTAATACATGCAAGTCGAGGGGCAGCACTGGTAGCAATAC
CTGGTGGCGACCGGCGGACGGGTGCGTAACACGTATGCAACCTACCCTGTACAGGGGGATAGCCCGAGGAAATTCGGATT
AATACCCCATACGATAAGAATCGGCATCGATTTTTATTGAAAGCTCCGGCGGTACAGGATGGGCATGCGCCCCATTAGCT
AGTTGGTGAGGTAACGGCTCACCAAGGCTACGATGGGTAGGGGGCCTGAGAGGGTGATCCCCCACACTGGAACTGAGACA
CGGTCCAGACTCCTACGGGAGGCAGCAGTAAGGAATATTGGTCAATGGGCGCAAGCCTGAACCAGCCATGCCGCGTGCAG
GAAGACTGCCATTATGGTTGTAAACTGCTTTTATATGGGAAGAAACCTCCGGACGTGTCCGGAGCTGACGGTACCATGTG
AATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATCCAAGCGTTATCCGGATTTATTGGGTTTAAA
GGGTGCGTAGGCGGCGTGTTAAGTCAGAGGTGAAATTCGGCAGCTCAACTGTCAAATTGCCTTTGATACTGGCACACTTG
AATGCGATTGAGGTAGGCGGAATGTGACATGTAGCGGTGAAATGCTTAGACATGTGACAGAACACCGATTGCGAAGGCAG
CTTACCAAGTCGTTATTGACGCTGAGGCACGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTA
AACGATGATAACTCGACGTTAGCGATACACTGTTAGCGTCCAAGCGAAAGCGTTAAGTTATCCACCTGGGAAGTACGATC
GCAAGGTTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGATGATACGCGAGGA
ACCTTACCAGGGCTTAAATGGGGAACGACCTTCTGGGAAACCAGAATTTCTTTTAGACGGTCCTCAAGGTGCTGCATGGT
TGTCGTCAGCTCGTGCCGTGAGGTGTTGGGTTAAGTCCCGCAACGAGCGCAACCCCTACTGTTAGTTGCCAGCGGATAAT
GCCGGGGACTCTAGCGGAACTGCCTGTGCAAACAGAGAGGAAGGTGGGGATGACGTCAAATCATCACGGCCCTTACGTCC
TGGGCTACACACGTGCTACAATGGCCGGTACAGAGGGCAGCCACTTCGTGAGAAGGAGCGAATCCTTAAAGCCGGTCTCA
GTTCGGATTGTAGTCTGCAACTCGACTACATGAAGCTGGAATCGCTAGTAATCGCGTATCAGCCATGACGCGGTGAATAC
GTTCCCGGGCCTTGTACACACCGCCCGTCAAGCCATGGGAATTGGGAGTACCTAAAGTCGGTAACCGCAAGGAGCCGCCT
AAGGTAATACCAGTGACTGGGGCTAAGTCGTAACAAGGTAGCCGTA
>42684
AGAGTTTGATCCTGGCTCAGATTGAACGCTGGCGGCATGCTTTACACATGCAAGTCGGACGGCAGCACAGAGGAGCTTGC
TTCTTGGGTGGCGAGTGGCGAACGGGTGAGTGACGCATCGGAACGTACCGAGTAATGGGGGATAACTGTCCGAAAGGACA
GCTAATACCGCATACGCCCTGAGGGGGAAAGCGGGGGATCTTAGGACCTCGCGTTATTCGAGCGGCCGATGTCTGATTAG
CTGGTTGGCGGGGTAAAGGCCCACCAAGGCGACGATCAGTAGCGGGTCTGAGAGGATGATCCGCCACACTGGGACTGAGA
CACGGCCCAGACTCCTACGGGAGGCAGCAGTGGGGAATTTTGGACAATGGGCGCAAGCCTGATCCAGCCATGCCGCGTGT
CTGAAGAAGGCCTTCGGGTTGTAAAGGACTTTTGTCAGGGAAGAAAAGGAACGTGTTAATACCATGTTCTGATGACGGTA
CCTGAAGAATAAGCACCGGCTAACTACGTGCCAGCAGCCGCGGTAATACGTAGGGTGCGAGCGTTAATCGGAATTACTGG
GCGTAAAGCGGGCGCAGACGGTTACTTAAGCGGGATGTGAAATCCCCGGGCTCAACCCGGGAACTGCGTTCCGAACTGGG
TGGCTAGAGTGTGTCAGAGGGGGGTAGAATTCCACGTGTAGCAGTGAAATGCGTAGAGATGTGGAGGAATACCGATGGCG
AAGGCAGCCCCCTGGGATAACACTGACGTTCATGCCCGAAAGCGTGGGTAGCAAACAGGGTTAGATACCCTGGTAGTCCA
CGCCCTAAACGATGTCGATTAGCTGTTGGGGCACTTGATGCCTTAGTAGCGTAGCTAACGCGTGAAATCGACCGCCTGGG
GAGTACGGTCGCAAGATTAAAACTCAAAGGAATTGACGGGGACCCGCACAAGCGGTGGATGATGTGGATTAATTCGATGC
AACGCGAAGAACCTTACCTGGTCTTGACATGTACGGAATCTTCCAGAGACGGAAGGGTGCCTTCGGGAGCCGTAACACAG
GTGCTGCATGGCTGTCGTCAGCTCGTGTCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGTCATTAGTTG
CCATCACTTGGTTGGGCACTCTAATGAGACTGCCGGTGACAAACCGGAGGAAGGTGGGGATGACGTCAAGTCCTCATGGC
CCTTATGACCAGGGCTTCACACGTCATACAATGGTCGGTACAGAGGGTAGCCAAGCCGCGAGGCGGAGCCAATCCCAGAA
AACCGATCGTAGTCCGGATTGCACTCTGCAACTCGAGTGCATGAAGTCGGAATCGCTAGTAATCGCAGGTCAGCATACTG
CGGTGAATACGTTCCCGGGTCTTGTACACACCGCCCGTCACACCATGGGAGTGGGGGATACCAGAAGCAGGTAGGCTAAC
CGCAAGGAGGCCGCTTGCCACGGTATGCTTCATGACTGGGGTGAAGTCGTAACAAGGTAAC
>342684
AGAGTTTGATCCTGGCTCAGGATGAACGCTAGCGGCAGGCTTAACACATGCAAGTCGAGGGGCATCGCGGGTAGCAATAC
CTGGCGGCGACCGGCGGAAGGGTGCGTAACGCGTGAGCGACATACCCGTGACAGGGGGATAACAGATGGAAACGTCTCCT
AATACCCCATAAGATCATATATCGCATGGTATGTGATTGAAAGGTGAGAACCGGTCACGGATTGGCTCGCGTCCCATCAG
GTAGACGGCGGGGCAGCGGCCCGCCGTGCCGACGACGGGTAGGGGCTCTGAGAGGAGTGACCCCCACAATGGAACTGAGA
CACGGTCCATACTCCTACGGGAGGCAGCAGTGAGGAATATTGGTCAATGGGCGGAAGCCTGAACCAGCCATGCCGCGTGC
GGGAGGACGGCCCTATGGGTTGTAAACCGCTTTTGAGTGAGAGCAATAAGGTTCACGTGTGGACCGATGAGAGTATCATT
CGAATAAGCATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATGCGAGCGTTATCCGGATTCATTGGGTTTA
AAGGGTGCGTAGGCGGACATGTAAGTCCGAGGTGAAAGACCGGGGCCCAACCCCGGGGTTGCCTCGGATACTGTGTGTCT
GGAGTGGACGTGCCGCCGGGGGAATGAGTGGTGTAGCGGTGAAATGCATAGATGTCACTCAGAACACCGATTGCGAAGGC
ACCTGGCGAATGTCTTACTGACGCTGAGGCACGAAAGCGTGGGGATCGAACAGGATTAGATACCCTGGTAGTCCACGCAG
TAAACGATGATGGCTGTCCGTTCGCTCCGATAGGAGTGAGTAGACAAGCGAAAGCGCTAAGCCATCCACCTGGGGAGTAC
GGCCGCAAGGCTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGAGGAACATGTGGTTTAATTCGATGATACGCG
AGGAACCTTACCCGGGCTCGAACGGCAGGTGAACGATGCAGAGATGCAAAGGCCCTTCGGGGCGTCTGTCGAGGTGCTGC
ATGGTTGTCGTCAGCTCGTGCCGTGAGGTGTCGGCTCAAGTGCCATAACGAGCGCAACCCTTGCCTGCAGTTGCCATCGG
GTAAAGCCGGGGACTCTGCAGGGACTGCCACCGCAAGGTGAGAGGAGGGGGGGGATGACGTCAAATCAGCACGGCCCTTA
CGTCCGGGGCGACACACGTGTTACAATGGCGGCCACAGCGGGAAGCCACCCAGTGATGGGGCGCGGATCCCAAAAAAGCC
GCCTCAGTTCGGATCGGAGTCTGCAACCCGACTCCGTGAAGCTGGATTCGCTAGTAATCGCGCATCAGCCATGGCGCGGT
GAATACGTTCCCGGGCCTTGTACACACCGCCCGTCAAGCCATGGGAGTCGTGGGCGCCTGAAGGCCGTGACCGCGAGGAG
CGGCCTAGGGCGAACGCGGTGACTGGGGCTAAGTCGTAACAAGGTA
>295053
AGAGTTTGATCCTGGCTCAGGACGAACGCTGGCGGCGTGCCTAACACATGCAAGTCGAACGGAGATGCTCCTTCGGGAGT
ATCTTAGTGGCGAACGGGTGAGTAACGCGTGAGCAACCTGACCTTCACAGGGGGATAACCGCTGGAAACAGCAGCTAATA
CCGCATAACGTCGCAAGACCAAAGAGGGGGACCTTCGGGCCTCTTGCCATCGGATGTGCCCAGATGGGATTAGCTTGTTG
GTGGGGTAACGGCTCACCAAGGCGACGATCCCTAGCTGGTCTGAGAGGATGACCAGCCACACTGGAACTGAGACACGGTC
CAGACTCCTACGGGAGGCAGCAGTGGGGAATATTGCACAATGGGCGCAAGCCTGATGCAGCCATGCCGCGTGTATGAAGA
AGGCCTTCGGGTTGTAAAGTACTTTCAGCGGGGAGGAAGGGAGTAAAGTTAATACCTTTGCTCATTGACGTTACCCGCAG
AAGAAGCACCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAA
GCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTG
AGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTGAAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGG
CCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTA
AACGATGTCGACTTGGAGGTTGTGCCCTTGAGGCGTGGCTTCCGGAGCTAACGCGTTAAGTCGACCGCCTGGGGAGTACG
GCCGCAAGGTTAAAACTCAAATGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGATGCAACGCGA
AGAACCTTACCTGGTCTTGACATCCACAGAACTTTCCAGAGATGGATTGGTGCCTTCGGGAACTGTGAGACAGGTGCTGC
ATGGCTGTCGTCAGCTCGTGTTGTGAAATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGTCCTTTGTTGCCAGCGG
TCCGGCCGGGAACTCAAAGGAGACTGCCAGTGATAAACTGGAGGAAGGTGGGGATGACGTCAAGTCATCATGGCCCTTAC
GACCAGGGCTACACACGTGCTACAATGGCGCATACAAAGAGAAGCGACCTCGCGAGAGCAAGCGGACCTCATAAAGTGCG
TCGTAGTCCGGATTGGAGTCTGCAACTCGACTCCATGAAGTCGGAATCGCTAGTAATCGTGGATCAGAATGCCACGGTGA
ATACGTTCCCGGGCCTTGCACACACCGCC
>879972
GACGAACGCTGGCGGCGTGCCTAATACATGCAAGTCGAACGAGATTGACCGGTGCTTGCACTGGTCAATCTAGTGGCGAA
CGGGTGAGTAACACGTGGGTAACCTGCCCATCAGAGGGGGATAACATTCGGAAACGGATGCTAAAACCGCATAGGTCTTC
GAACCGCATGGTTTGAAGAGGAAAAGAGGCGCAAGCTTCTGCTGATGGATGGACCCGCGGTGTATTAGCTAGTTGGTGGG
GTAACGGCTCACCAAGGCGACGATACATAGCCGACCTGAGAGGGTGATCGGCCACACTGGGACTGAGACACGGCCCAGAC
TCCTACGGGAGGCAGCAGTAGGGAATCTTCGGCAATGGACGGAAGTCTGACCGAGCAACGCCGCGTGAGTGAAGAAGGTT
TTCGGATCGTAAAGCTCTGTTGTAAGAGAAGAACGAGTGTGAGAGTGGAAAGTTCACACTGTGACGGTATCTTACCAGAA
AGGGACGGCTAACTACGTGCCAGCAGCCGCGGTAATACGTAGGTCCCGAGCGTTGTCCGGATTTATTGGGCGTAAAGCGA
GCGCAGGCGGTTAGATAAGTCTGAAGTTAAAGGCTGTGGCTTAACCATAGTACGCTTTGGAAACTGTTTAACTTGAGTGC
AAGAGGGGAGAGTGGAATTCCATGTGTAGCGGTGAAATGCGTAGATATATGGAGGAACACCGGTGGCGAAAGCGGCTCTC
TGGCTTGTAACTGACGCTGAGGCTCGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCGTAAACGA
TGAGTGCTAGGTGTTAGACCCTTTCCGGGGTTTAGTGCCGCAGCTAACGCATTAAGCACTCCGCCTGGGGAGTACGACCG
CAGGGTTGAAACTCAAAGGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGAAGCAACGCGAAGAA
CCTTACCAGGTCTTGACATCCCTCTGACCGCTCTAGAGATAGAGCTTTCCTTCGGGACAGAGGTGACAGGTGGTGCATGG
TTGTCGTCAGCTCGTGTCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCCTATTGTTAGTTGCCATCATTCAG
TTGGGCACTCTAGCGAGACTGCCGGTAATAAACCGGAGGAAGGTGGGGATGACGTCAAATCATCATGCCCCTTATGACCT
GGGCTACACACGTGCTACAATGGCTGGTACAACGAGTCGCAAGCCGGTGACGGCAAGCTAATCTCTTAAAGCCAGTCTCA
GTTCGGATTGTAGGCTGCAACTCGCCTACATGAAGTCGGAATCGCTAGTAATCGCGGATCAGCACGCCGCGGTGAATACG
TTCCCGGGCCT
"""
# Reads to search against the database
# - 10 rRNA reads: amplicon reads were taken from Qiime study 1685
# - 10 random reads: simulated using mason with the following command:
# mason illumina -N 10 -snN -o simulated_random_reads.fa -n
# 150 random.fasta
# - 10 rRNA reads with id < 97: amplicon reads were taken from
# Qiime study 1685
sortmerna_read_seqs_fp = """>HMPMockV1.2.Staggered2.673827_47 M141:79:749142:1:1101:16169:1589
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCAAGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATTTGATACTGGCAAGCTTGAGTCTCGTAGAGGAGGGTAGAATTCCAGGTGTAGCGGGG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCTCCATGGACGAAGACTGACGCT
>HMPMockV1.2.Staggered2.673827_115 M141:79:749142:1:1101:14141:1729
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CCGGCTCAACCTTGGAACTGCATCTGATACGGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCTCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCAAACA
>HMPMockV1.2.Staggered2.673827_122 M141:79:749142:1:1101:16032:1739
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GTGATCAAACA
>HMPMockV1.2.Staggered2.673827_161 M141:79:749142:1:1101:17917:1787
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCTCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCAAACA
>HMPMockV1.2.Staggered2.673827_180 M141:79:749142:1:1101:16014:1819
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGTGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
>HMPMockV1.2.Staggered2.673827_203 M141:79:749142:1:1101:17274:1859
TACGGAGGTTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CCGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCTCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGATCAAACA
>HMPMockV1.2.Staggered2.673827_207 M141:79:749142:1:1101:17460:1866
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCAAACA
>HMPMockV1.2.Staggered2.673827_215 M141:79:749142:1:1101:18390:1876
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACG
>HMPMockV1.2.Staggered2.673827_218 M141:79:749142:1:1101:18249:1879
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTTCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCCCCTGGACGAAGACTGACGCTCAGGTGCGAAAGCGTG
GGGAGCACACA
>HMPMockV1.2.Staggered2.673827_220 M141:79:749142:1:1101:15057:1880
TACGGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGCAGGCGGTTTGTTAAGTCAGATGTGAAATCCC
CGGGCTCAACCTGGGAACTGCATCTGATACTGGCAAGCTTGAGTCTCGTAGAGGGGGGTAGAATTCCAGGTGTAGCGGTG
AAATGCGTAGAGATCTGGAGGAATACCGGTGGCGAAGGCGGCCTCCTGGACGAAGACTGACGCTC
>simulated_random_reads.fa.000000000
AGCCGGGTGTCTACGGTCAGGTGTGTTCTGACTACGTAGTTTGACAGCACGTGTCCTTTCCCCTTCCCAAGGTAACGAAT
TGTCGTTATCAACGTTTCGATCCGTAATTTCACGGAACGACATAAAGGCATCAATACTATCGCCAACAGA
>simulated_random_reads.fa.000000001
GTGGACGTCGTGGCGGCGTACTAACTTCCTACAGGCATATCCGGAATAACATTCTGCCGCTTGTCGACATAAGCTGTTCC
CTACATAGACGACGACGGTTGAAGGGTGTATGTATTCTTTGGGTACGGCTCCTCTGGGCGCATGGTAGCA
>simulated_random_reads.fa.000000002
CATTCTTTATAGGCCTACAACACTAATCATCGTTAAGCATAAGGGGAGGAGTGTGCGTGGCATCAAGTCCTGGTTCTTCG
CCTAGTACCACACCGTCTCACACGCAGCCGCCGACGACCAGTGAGGGCGCGTGGGACACCCATTCGGTCC
>simulated_random_reads.fa.000000003
TCGCCTTGGTACAAACAGTCGCGGCACGCTGTATGGAGGACCATAGAGGCACAGGCTGAGGACAGGGGCATGGAAGGTTC
AATCGCCCCCCACAGCTTTAGGTAGGAAGTACTGTTCTAGTGCCAATTTGATTTTAACGGCAGTTACTCG
>simulated_random_reads.fa.000000004
CATATTCTAATATCCTACTTCTGATACCCGATTATACACGACACCACCCCAGGACTGTCGTCACATCCTTATCTGGATAA
ACATCCGGTTCCGTTTGGCCGTGCTCCGCAAGTGATGCGTCTGTGGAATGTACGTGGAGCGTTGACAGTT
>simulated_random_reads.fa.000000005
CCGGATTAGGCATGTTTATAGTACAACGGATTCGCAAAAAGGTCAGGGTAACAATTTTGAAATGCTTTCATACTGCGGTC
TAAATGGACCACCCTTTAGGTGCAGCCAACTATAGTTGGTCGATTCTCTGAACACGTACCGAAGGCAATT
>simulated_random_reads.fa.000000006
AACCCATCGGAATAATCTACTGCTTCGTATGGAACGGTCCTACATTTAAATAAACGTGTCCAGTGCCACCCGATACCTCT
CGTCAATCAGGGGCTCTCCCTGAATCAGCAGTAAACAAACCCAGTACACTGTCGAACACTACTGAGACCG
>simulated_random_reads.fa.000000007
CCGAAGGCAAGTCTGTCGTAGAATGGTTTTTGTCGTTGTAACAACCCCGCTCTAGACCCTGAAAACCATAAAGTCAAGCC
CAACTAATATTAGAGGCATTCTGGCTACTCCCGCTCACCGCAATCTTCACATACTGTGATACCCTCAGCC
>simulated_random_reads.fa.000000008
ATATCCGTTAAACCCCGGATTTGACAATTCATCATCAACGCTACTAACGGCTTTCTCAATTTGGGGCTGTGGCCTATCCG
CATACGGCTACCTGCGCAAGAAGAGAGTACTGTTAGATGTCACGCTGCACTTGCGAAGACCGGTGGGCGT
>simulated_random_reads.fa.000000009
AGCGATGAGTACACAAGATGAGTGAAGGGATTAAACTTCAAACCTTGAAGTGTTACCCGATTTCCTACCATTGGGGATTC
GTTAATGCTTCGAATGGATCTATATCCGGTGTTTAGCTGACTGTTAAAATACTCTCGTTGTACGAAAGTA
>HMPMockV1.2.Staggered2.673827_0 M141:79:749142:1:1101:17530:1438
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGCAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACCTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
>HMPMockV1.2.Staggered2.673827_1 M141:79:749142:1:1101:17007:1451
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTTACGCTG
>HMPMockV1.2.Staggered2.673827_2 M141:79:749142:1:1101:16695:1471
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
GGGA
>HMPMockV1.2.Staggered2.673827_3 M141:79:749142:1:1101:17203:1479
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGTAGAGATATGGAGGAACACCAGTGGCGAAGGCGACGTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
G
>HMPMockV1.2.Staggered2.673827_4 M141:79:749142:1:1101:14557:1490
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGGCTGTAACTGACGCTGATGTGCGCAAGCGTG
GTGATCAAACA
>HMPMockV1.2.Staggered2.673827_5 M141:79:749142:1:1101:16104:1491
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGC
>HMPMockV1.2.Staggered2.673827_6 M141:79:749142:1:1101:16372:1491
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACAACAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGTAAG
>HMPMockV1.2.Staggered2.673827_7 M141:79:749142:1:1101:17334:1499
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGT
>HMPMockV1.2.Staggered2.673827_8 M141:79:749142:1:1101:17273:1504
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
AAATGCACAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGA
>HMPMockV1.2.Staggered2.673827_9 M141:79:749142:1:1101:16835:1505
TACGTAGGTGGCAAGCGTTATCCGGAATTATTGGGCGTAAAGCGCGCGTAGGCGGTTTTTTAAGTCTGATGTGAAAGCCC
ACGGCTCAACCGTGGAGGGTCATTGGAAACTGGAAAACTTGAGTGCAGAAGAGGAAAGTGGAATTCCATGTGTAGCGGTG
ACATGCGCAGAGATATGGAGGAACACCAGTGGCGAAGGCGACTTTCTGGTCTGTAACTGACGCTGATGTGCGAAAGCGTG
GGGAT
"""
# resulting OTU map for sortmerna_read_seqs_fp vs. sortmerna_reference_seqs_fp
sortmerna_otumap_fp = """295053\tHMPMockV1.2.Staggered2.673827_47\tHMPMockV1.2.Staggered2.673827_115\tHMPMockV1.2.Staggered2.673827_122\tHMPMockV1.2.Staggered2.673827_161\tHMPMockV1.2.Staggered2.673827_180\tHMPMockV1.2.Staggered2.673827_203\tHMPMockV1.2.Staggered2.673827_207\tHMPMockV1.2.Staggered2.673827_215\tHMPMockV1.2.Staggered2.673827_218\tHMPMockV1.2.Staggered2.673827_220\n"""
# failures file (all random reads in sortmerna_read_seqs_fp)
sortmerna_failures_fp = """HMPMockV1.2.Staggered2.673827_0
HMPMockV1.2.Staggered2.673827_1
HMPMockV1.2.Staggered2.673827_2
HMPMockV1.2.Staggered2.673827_3
HMPMockV1.2.Staggered2.673827_4
HMPMockV1.2.Staggered2.673827_5
HMPMockV1.2.Staggered2.673827_6
HMPMockV1.2.Staggered2.673827_7
HMPMockV1.2.Staggered2.673827_8
HMPMockV1.2.Staggered2.673827_9
"""
# run unit tests if run from command-line
if __name__ == '__main__':
main()
|
josenavas/qiime
|
tests/test_pick_otus.py
|
Python
|
gpl-2.0
| 215,332
|
[
"BLAST"
] |
f2e0c89ebb384df66718dc70e665192f2e57c7c6afbaad39716624e4abe111d7
|
######################################
#
# Nikolai Rozanov (C) 2017-Present
#
# nikolai.rozanov@gmail.com
#
#####################################
#
# the bottom part of this file is not by me (as is indicated below)
#
import numpy as np
from sklearn.utils import check_random_state
def circle(n,var,rs=1):
rs = check_random_state(rs)
xvec = np.linspace(0,2*np.pi,n)
X = np.cos(xvec) + rs.normal(0,var,n)
Y = np.sin(xvec) + rs.normal(0,var,n)
return np.reshape(X,[-1,1]), np.reshape(Y,[-1,1])
######################################
#
# THE CODE BELOW IS NOT MY CODE
# SOURCE GITHUB: https://github.com/dougalsutherland/opt-mmd/blob/master/two_sample/generate.py
#####################################
def gaussian(n,corr,rs=1):
rs = check_random_state(rs)
mu = np.zeros(2)
correlation = corr
corr_sigma = np.array([[1, correlation], [correlation, 1]])
Y = rs.multivariate_normal(mu, corr_sigma, size=n)
return np.reshape(Y[:,0],[-1,1]), np.reshape(Y[:,1],[-1,1])
def blobs(n, corr, rows=5, cols=5, sep=10, rs=1):
rs = check_random_state(rs)
# ratio is eigenvalue ratio
correlation = corr
# generate within-blob variation
mu = np.zeros(2)
sigma = np.eye(2)
corr_sigma = np.array([[1, correlation], [correlation, 1]])
Y = rs.multivariate_normal(mu, corr_sigma, size=n)
Y[:, 0] += rs.randint(rows, size=n) * sep
Y[:, 1] += rs.randint(cols, size=n) * sep
return np.reshape(Y[:,0],[-1,1]), np.reshape(Y[:,1],[-1,1])
|
Kolyan-1/MSc-Thesis-Code
|
Data/synthetic2.py
|
Python
|
bsd-3-clause
| 1,526
|
[
"Gaussian"
] |
b405cf9ec902d9252c1a88e6c71d089e0ca7e19e7c58e9ae28ddad30e420bd79
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# signup - Handle user input from external sign up
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Front end to handle external sign up"""
import cgi
import cgitb
cgitb.enable()
from shared.functionality.signup import main
from shared.cgiscriptstub import run_cgi_script_possibly_with_cert
run_cgi_script_possibly_with_cert(main)
|
heromod/migrid
|
mig/cgi-bin/signup.py
|
Python
|
gpl-2.0
| 1,166
|
[
"Brian"
] |
497cc59df066845a05a2ad12178a319631edbbe6a6813d0f7c732cdd981e59d7
|
#!/usr/bin/python
import sys,re
samfile = open(sys.argv[1],'r')
# Second arguments deals with paired reads specification. Add a tag to precise if it's the forward or the reverse strand.
# The tag can be found as the second field of the first line of a fastq file, ususally (illumina) in the form of:
# Forward: 1:N:0:0
# Reverse: 2:N:0:0
if len(sys.argv)>2:
toadd = " "+sys.argv[2]
else:
toadd = ""
# Creating output file name based on input
name = ".".join(sys.argv[1].split(".")[:-1])+".vector_filtered.fastq"
fout = open(name,"w")
# Regular expression used to parse cigar code from sam
cigMre = re.compile('[0-9]+M')
cigSre = re.compile('[0-9]+S')
# Parse cigar string to get length of aligment (count M) and return span of the aligned part of the read for clipping.
# Must watch for indel/ soft and hard clipping in order to be sure to keep the "good" part (the one that do NOT match). internal M must be avoided for example.
def parse_cig(c,l):
# c is the cigar string, l is the length of the read (necessary to get position to keep
cigMiter = re.finditer(cigMre,c)
alig_len = 0
cpt = 0
for match in cigMiter:
cpt+=1
alig_len+=int(match.group()[:-1]) # int(match.group()[:-1]) is the number before the M character in the cigar string
# if more than one matching position on the alignment (insertion/deletion, weird stuff, we will discard the read)
if cpt>1:
return (0,0)
# find soft clipped position:
cigSiter = re.finditer(cigSre,c)
pos_keep=[]
for match in cigSiter:
# if soft clipped postion is at the beginning of the read:
if match.span()[0]==0:
pos_keep=[0,int(match.group()[:-1])] # int(match.group()[:-1]) is the number before the S character in the cigar string
else:
#if soft clipped position is at the end of the read
if match.span()[1]==len(c):
pos_keep=[l-int(match.group()[:-1]),l]
return (float(alig_len),pos_keep)
def store_reads(f,o,p):
c = o.split()
readname = c[0]+" "+toadd
readseq = c[9][p[0]:p[1]]
readqual = c[10][p[0]:p[1]]
f.write("@"+readname+"\n")
f.write(readseq+"\n")
f.write("+\n")
f.write(readqual+"\n")
# Treat sam line in order to choose to retain (or not) the read:
def parse_se(obj):
c = obj.split()
# readname = c[0]
flag = int(c[1])
# Test for sam flag. If> 16, means read is dodgy (mapped in several location). We will just throw it away.
if flag > 16:
return 0
# else
ref = c[2]
readlen = len(c[9])
# MapQ = int(c[4])
# cig = c[5]
# If the reads didn't map on the vector, then we keep it as such. I'm not sure that there's no more vector sequence in this read (if the vector sequence is shorter than baw seed), so a trimming of the edge will be necessary
if ref=="*":
store_reads(fout,obj,[0,readlen])
# Else, the read map to the vector. Instead of throwing it away, we will examine it in order to keep the part of the read that don't map to the vector, based on the cigar sequence.
else:
readname = c[0]
cig = c[5]
# get the aligned part.
alig_len,Spos = parse_cig(cig,readlen)
# If dodgy read (align in more than one part, ie cigar M is splitted on the ref, disregard read (only a few are concerned)
if alig_len == 0:
return 0
#print Spos
#NM flag is in the 11th position
NM = c[11]
#edit distance: last field of NM
eNM = int(NM.split(":")[-1])
# pc_id: identity percentage of the mapped region / cov : proportion of the reads that map to the vector
pc_id = 1-(eNM/alig_len)
cov = alig_len/float(readlen)
#print obj[:-1]
#print readname,ref,alig_len,eNM,pc_id,readlen,alig_len,cov
# So if the pc_id is high and at least 10 percent of the read is recoverable,
if pc_id >= 0.95 and cov <= 0.9:
store_reads(fout,obj,Spos)
line = "@SQ"
while line[0:3]=="@SQ" or line[0:3]=="@PG":
line = samfile.readline()
cpt = 0
# here for multithreading if you feel like implmenting it.
# This prog with bwa mem is a good alternative to deconseq, think about it.
for line in samfile:
# print line.rstrip()
parse_se(line)
cpt+=1
# if cpt > 2000:
# break
fout.close()
samfile.close()
|
loire/bac2ass
|
Scripts/extract_BAC_reads.py
|
Python
|
gpl-2.0
| 4,439
|
[
"BWA"
] |
a843ad275129580c1289d8a4872899824d28a2482563cbc757c0f3a324a33f35
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_vm
short_description: Manage KubeVirt virtual machine
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machines.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Set the virtual machine to either I(present), I(absent), I(running) or I(stopped).
- "I(present) - Create or update a virtual machine. (And run it if it's ephemeral.)"
- "I(absent) - Remove a virtual machine."
- "I(running) - Create or update a virtual machine and run it."
- "I(stopped) - Stop a virtual machine. (This deletes ephemeral VMs.)"
default: "present"
choices:
- present
- absent
- running
- stopped
type: str
name:
description:
- Name of the virtual machine.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine exists.
required: true
type: str
ephemeral:
description:
- If (true) ephemeral virtual machine will be created. When destroyed it won't be accessible again.
- Works only with C(state) I(present) and I(absent).
type: bool
default: false
datavolumes:
description:
- "DataVolumes are a way to automate importing virtual machine disks onto pvcs during the virtual machine's
launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning
it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user."
type: list
template:
description:
- "Name of Template to be used in creation of a virtual machine."
type: str
template_parameters:
description:
- "New values of parameters from Template."
type: dict
extends_documentation_fragment:
- k8s_auth_options
- kubevirt_vm_options
- kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Start virtual machine 'myvm'
kubevirt_vm:
state: running
name: myvm
namespace: vms
- name: Create virtual machine 'myvm' and start it
kubevirt_vm:
state: running
name: myvm
namespace: vms
memory: 64Mi
cpu_cores: 1
bootloader: efi
smbios_uuid: 5d307ca9-b3ef-428c-8861-06e72d69f223
cpu_model: Conroe
headless: true
hugepage_size: 2Mi
tablets:
- bus: virtio
name: tablet1
cpu_limit: 3
cpu_shares: 2
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Create virtual machine 'myvm' with multus network interface
kubevirt_vm:
name: myvm
namespace: vms
memory: 512M
interfaces:
- name: default
bridge: {}
network:
pod: {}
- name: mynet
bridge: {}
network:
multus:
networkName: mynetconf
- name: Combine inline definition with Ansible parameters
kubevirt_vm:
# Kubernetes specification:
definition:
metadata:
labels:
app: galaxy
service: web
origin: vmware
# Ansible parameters:
state: running
name: myvm
namespace: vms
memory: 64M
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Start ephemeral virtual machine 'myvm' and wait to be running
kubevirt_vm:
ephemeral: true
state: running
wait: true
wait_timeout: 180
name: myvm
namespace: vms
memory: 64M
labels:
kubevirt.io/vm: myvm
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Start fedora vm with cloud init
kubevirt_vm:
state: running
wait: true
name: myvm
namespace: vms
memory: 1024M
cloud_init_nocloud:
userData: |-
#cloud-config
password: fedora
chpasswd: { expire: False }
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/fedora-cloud-container-disk-demo:latest
path: /disk/fedora.qcow2
disk:
bus: virtio
- name: Remove virtual machine 'myvm'
kubevirt_vm:
state: absent
name: myvm
namespace: vms
'''
RETURN = '''
kubevirt_vm:
description:
- The virtual machine dictionary specification returned by the API.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachine)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC,
VM_SPEC_DEF_ARG_SPEC
)
VM_ARG_SPEC = {
'ephemeral': {'type': 'bool', 'default': False},
'state': {
'type': 'str',
'choices': [
'present', 'absent', 'running', 'stopped'
],
'default': 'present'
},
'datavolumes': {'type': 'list'},
'template': {'type': 'str'},
'template_parameters': {'type': 'dict'},
}
# Which params (can) modify 'spec:' contents of a VM:
VM_SPEC_PARAMS = list(VM_SPEC_DEF_ARG_SPEC.keys()) + ['datavolumes', 'template', 'template_parameters']
class KubeVirtVM(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(VM_COMMON_ARG_SPEC)
argument_spec.update(VM_ARG_SPEC)
return argument_spec
@staticmethod
def fix_serialization(obj):
if obj and hasattr(obj, 'to_dict'):
return obj.to_dict()
return obj
def _wait_for_vmi_running(self):
for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
phase = status.get('phase', None)
if phase == 'Running':
return entity
self.fail("Timeout occurred while waiting for virtual machine to start. Maybe try a higher wait_timeout value?")
def _wait_for_vm_state(self, new_state):
if new_state == 'running':
want_created = want_ready = True
else:
want_created = want_ready = False
for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
created = status.get('created', False)
ready = status.get('ready', False)
if (created, ready) == (want_created, want_ready):
return entity
self.fail("Timeout occurred while waiting for virtual machine to achieve '{0}' state. "
"Maybe try a higher wait_timeout value?".format(new_state))
def manage_vm_state(self, new_state, already_changed):
new_running = True if new_state == 'running' else False
changed = False
k8s_obj = {}
if not already_changed:
k8s_obj = self.get_resource(self._kind_resource)
if not k8s_obj:
self.fail("VirtualMachine object disappeared during module operation, aborting.")
if k8s_obj.spec.get('running', False) == new_running:
return False, k8s_obj
newdef = dict(metadata=dict(name=self.name, namespace=self.namespace), spec=dict(running=new_running))
k8s_obj, err = self.patch_resource(self._kind_resource, newdef, k8s_obj,
self.name, self.namespace, merge_type='merge')
if err:
self.fail_json(**err)
else:
changed = True
if self.params.get('wait'):
k8s_obj = self._wait_for_vm_state(new_state)
return changed, k8s_obj
def _process_template_defaults(self, proccess_template, processedtemplate, defaults):
def set_template_default(default_name, default_name_index, definition_spec):
default_value = proccess_template['metadata']['annotations'][default_name]
if default_value:
values = definition_spec[default_name_index]
default_values = [d for d in values if d.get('name') == default_value]
defaults[default_name_index] = default_values
if definition_spec[default_name_index] is None:
definition_spec[default_name_index] = []
definition_spec[default_name_index].extend([d for d in values if d.get('name') != default_value])
devices = processedtemplate['spec']['template']['spec']['domain']['devices']
spec = processedtemplate['spec']['template']['spec']
set_template_default('defaults.template.cnv.io/disk', 'disks', devices)
set_template_default('defaults.template.cnv.io/volume', 'volumes', spec)
set_template_default('defaults.template.cnv.io/nic', 'interfaces', devices)
set_template_default('defaults.template.cnv.io/network', 'networks', spec)
def construct_definition(self, kind, our_state, ephemeral):
definition = virtdict()
processedtemplate = {}
# Construct the API object definition:
defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
vm_template = self.params.get('template')
if vm_template:
# Find the template the VM should be created from:
template_resource = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='templates')
proccess_template = template_resource.get(name=vm_template, namespace=self.params.get('namespace'))
# Set proper template values taken from module option 'template_parameters':
for k, v in self.params.get('template_parameters', {}).items():
for parameter in proccess_template.parameters:
if parameter.name == k:
parameter.value = v
# Proccess the template:
processedtemplates_res = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='processedtemplates')
processedtemplate = processedtemplates_res.create(proccess_template.to_dict()).to_dict()['objects'][0]
# Process defaults of the template:
self._process_template_defaults(proccess_template, processedtemplate, defaults)
if not ephemeral:
definition['spec']['running'] = our_state == 'running'
template = definition if ephemeral else definition['spec']['template']
template['metadata']['labels']['vm.cnv.io/name'] = self.params.get('name')
dummy, definition = self.construct_vm_definition(kind, definition, template, defaults)
return self.merge_dicts(definition, processedtemplate)
def execute_module(self):
# Parse parameters specific to this module:
ephemeral = self.params.get('ephemeral')
k8s_state = our_state = self.params.get('state')
kind = 'VirtualMachineInstance' if ephemeral else 'VirtualMachine'
_used_params = [name for name in self.params if self.params[name] is not None]
# Is 'spec:' getting changed?
vm_spec_change = True if set(VM_SPEC_PARAMS).intersection(_used_params) else False
changed = False
crud_executed = False
method = ''
# Underlying module_utils/k8s/* code knows only of state == present/absent; let's make sure not to confuse it
if ephemeral:
# Ephemerals don't actually support running/stopped; we treat those as aliases for present/absent instead
if our_state == 'running':
self.params['state'] = k8s_state = 'present'
elif our_state == 'stopped':
self.params['state'] = k8s_state = 'absent'
else:
if our_state != 'absent':
self.params['state'] = k8s_state = 'present'
# Start with fetching the current object to make sure it exists
# If it does, but we end up not performing any operations on it, at least we'll be able to return
# its current contents as part of the final json
self.client = self.get_api_client()
self._kind_resource = self.find_supported_resource(kind)
k8s_obj = self.get_resource(self._kind_resource)
if not self.check_mode and not vm_spec_change and k8s_state != 'absent' and not k8s_obj:
self.fail("It's impossible to create an empty VM or change state of a non-existent VM.")
# If there are (potential) changes to `spec:` or we want to delete the object, that warrants a full CRUD
# Also check_mode always warrants a CRUD, as that'll produce a sane result
if vm_spec_change or k8s_state == 'absent' or self.check_mode:
definition = self.construct_definition(kind, our_state, ephemeral)
result = self.execute_crud(kind, definition)
changed = result['changed']
k8s_obj = result['result']
method = result['method']
crud_executed = True
if ephemeral and self.params.get('wait') and k8s_state == 'present' and not self.check_mode:
# Waiting for k8s_state==absent is handled inside execute_crud()
k8s_obj = self._wait_for_vmi_running()
if not ephemeral and our_state in ['running', 'stopped'] and not self.check_mode:
# State==present/absent doesn't involve any additional VMI state management and is fully
# handled inside execute_crud() (including wait logic)
patched, k8s_obj = self.manage_vm_state(our_state, crud_executed)
changed = changed or patched
if changed:
method = method or 'patch'
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_vm': self.fix_serialization(k8s_obj),
'method': method
})
def main():
module = KubeVirtVM()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
h3biomed/ansible
|
lib/ansible/modules/cloud/kubevirt/kubevirt_vm.py
|
Python
|
gpl-3.0
| 15,788
|
[
"Galaxy"
] |
8b88a9fde7b571c9815673675122970d31204164c3c79f9a9a01586b64e574b6
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2014 CRS4.
#
# This file is part of hadoop-galaxy, released under the terms of the BSD
# 3-Clause License <http://opensource.org/licenses/BSD-3-Clause>.
#
# END_COPYRIGHT
import argparse
import copy
import logging
import os
import subprocess
import sys
import yaml
import pydoop.hdfs as phdfs
from hadoop_galaxy.pathset import Pathset, FilePathset
from hadoop_galaxy.utils import get_abs_executable_path
EnvOutputDataDir = 'HADOOP_GALAXY_DATA_DIR'
EnvConfPath = 'HADOOP_GALAXY_CONF'
log = logging.getLogger('HadoopGalaxy')
# The configuration file:
#
# We have a partly implemented feature for specifying some configuration parameters
# through a configuration file, rather than through environment variables or
# command-line arguments. I started writing some documentation to put in the README,
# but then decided not to insert it because the feature is not really complete since
# the use of the configuration file isn't coherent across all Hadoop-Galaxy components.
#
# I'm dumping the partly written documentation here, for future use.
##########################################################################################
# **HADOOP\_GALAXY\_CONF**: path to configuration file where you can specify the
# same options as the preceding environment variables, and more. This variable is
# not yet respected by all Hadoop-Galaxy components (work-in-progress).
#
#
# #### Configuration file
#
# Some Hadoop-Galaxy components, including the adapter, support reading a yaml
# configuration file where you can configure the behaviour of the component and,
# for the adapter, configure how the Hadoop-based tool is run (e.g., set
# environment variables).
#
# The following keys are recognized:
#
# * HADOOP_HOME
# * HADOOP_CONF_DIR
# * tool\_env: next key-value paires to set environment
##########################################################################################
class HadoopToolRunner(object):
"""
Implements the logic necessary to run a Hadoop-based tool from
within Galaxy.
There are two reasons why this class is necessary.
The first is that typical Hadoop programs produce an output directory
containing many files. Galaxy, on the other hand, better supports the case
where a tool reads one input file and produces an output file. It also
supports multiple output files, but it seems to insist on trying to open the
main output path as a file (which causes a problem since Hadoop produces a
directory).
The second issue is that, since Hadoop programs usually process large data sets
and often operate on HDFS, one may not want to store those data sets in the Galaxy
'file_path' directory (its configured data set location).
To address these issues, we create a level of indirection. The HadoopToolRunner reads
as input a FilePathset and produces a FilePathset. These are the job data sets, as far
as Galaxy is concerned. These Pathsets contain URIs to the real data files. In turn,
HadoopToolRunner invokes the Hadoop-based program providing it with the contents of the
input Pathset as input paths, and recording its output directory in an output FilePathset
(the output data set provided to Galaxy).
The HadoopToolRunner also sets up the necessary Hadoop environment and forwards unrecognized
arguments down to the actual tool executable.
"""
def __init__(self, executable):
"""
Initialize a HadoopToolRunner for a specific tool.
tool_name will be used as the executable name.
"""
self.executable = executable
# a list of input paths, potentially containing wildcards that will be expanded by hadoop
self.input_params = None
# string -- output path
self.output_str = None
# a list of options
self.generic_opts = []
def __str__(self):
return '\n\t'.join(
(str(type(self)),
"executable: %s" % self.executable,
"input: %s" % str(self.input_params),
"output: %s" % self.output_str,
"opts: %s" % str(self.generic_opts))
)
def set_input(self, pset):
"""
Set the input paths for the Hadoop command.
"""
self.input_params = pset.get_paths()
def set_output(self, pset):
"""
Set the output path for the Hadoop command.
"""
if len(pset) != 1:
raise RuntimeError("Expecting an output pathset containing one path, but got %d" % len(pset))
self.output_str = iter(pset).next()
def parse_args(self, args_list):
"""
Gets the remainder of the arguments, split by argparse and sets them to
self.generic_opts. The arguments will be passed to the Hadoop tool as
generic options (placed them between the command name and the input path.
This method can be overridden to implement more sophisticated parsing
strategies.
"""
self.generic_opts = args_list
def command(self, env=None):
"""
Returns the arguments array to run this Hadoop command.
The returned array can be passed to subprocess.call and friends.
If the executable doesn't specify an absolute path, this method looks for the
program in the paths listed by the PATH environment variable. It also
verifies that the input and output parameters have been set.
"""
if self.executable is None:
raise RuntimeError("executable not set!")
if self.input_params is None:
raise RuntimeError("Input parameters not set!")
if self.output_str is None:
raise RuntimeError("output path not set!")
full_path = get_abs_executable_path(self.executable, (env or os.environ))
logging.getLogger(self.__class__.__name__).debug("Found tool: %s", full_path)
return [full_path] + self.generic_opts + self.input_params + [self.output_str]
def execute(self, logger, env=None):
"""
Executes the command.
This method calls self.command to build the command array and then executes
the command. If provided, the specified `env` will be used.
"""
cmd = self.command(env)
logger.debug("attempting to remove output path %s", self.output_str)
try:
phdfs.rmr(self.output_str)
except IOError as e:
logger.warning(e)
if not phdfs.path.exists(phdfs.path.dirname(self.output_str)):
phdfs.mkdir(phdfs.path.dirname(self.output_str))
logger.debug("Created parent of output directory")
logger.info("Executing command: %s", cmd)
logger.debug("PATH: %s", (env or os.environ).get('PATH'))
subprocess.check_call(cmd, env=env)
class HadoopGalaxy(object):
HadoopOutputDirName = 'hadoop_output'
@staticmethod
def build_parser():
"""
Build an arg parser for our "standard" command line. The
parser is returned so that the client can optionally add
to it or modify it.
"""
parser = argparse.ArgumentParser(description="Wrap Hadoop-based tools to run within Galaxy")
parser.add_argument('--input', metavar="InputPath", required=True,
help="Path to input pathset provided by Galaxy.")
parser.add_argument('--input-format', metavar="InputFormat", help="Input format provided by Galaxy.")
parser.add_argument('--output', metavar="OutputPath", required=True,
help="Output path provided by Galaxy")
parser.add_argument('--append-python-path', metavar="PATH",
help="Path to append to the PYTHONPATH before calling the executable")
parser.add_argument('--output-data-dir', metavar="PATH",
help="URI to a working directory where the Hadoop job will write its output. Can also be " +\
"set through HADOOP_GALAXY_DATA_DIR env. variable (default: Galaxy data dir).")
parser.add_argument('--conf', metavar="conf_file", help="Hadoop+Galaxy configuration file")
parser.add_argument('remaining_args', nargs=argparse.REMAINDER)
return parser
@staticmethod
def parse_args(parser, args=None):
"""
Simple helper method to avoid having to re-type boilerplate code
"""
if args is None:
args = sys.argv[1:] # skip the program name
options = parser.parse_args(args)
return options
def __init__(self):
self.conf = dict()
self._runner = None
self._cmd_env = dict()
@property
def runner(self):
return self._runner
@runner.setter
def runner(self, r):
self._runner = r
def _set_hadoop_conf(self):
"""
If our configuration contains HADOOP_HOME or HADOOP_CONF_DIR
copy them to our environment. Else, whatever is in the
current environment will remain as such.
"""
if self.conf.has_key('HADOOP_HOME'):
self._cmd_env['HADOOP_HOME'] = self.conf['HADOOP_HOME']
if self.conf.has_key('HADOOP_CONF_DIR'):
self._cmd_env['HADOOP_CONF_DIR'] = self.conf['HADOOP_CONF_DIR']
def gen_data_output_path(self, options, name=None):
"""
Generate an output path for the data produced by the hadoop job.
The default behaviour is to use the path provided for the output pathset
(options.output) as a base. Therefore, the data path is created as
os.path.dirname(options.output)/hadoop_output/os.path.basename(options.output)
So, in a typicaly situation a directory "hadoop_output" will be created
in the Galaxy data directory and the job output dir will be created
inside it (with the same name as the galaxy dataset).
.. note: in this manner your Hadoop job will not write to HDFS; instead,
it will write to the same storage as Galaxy.
This set-up is not ideal for most applications. So, this default directory
for hadoop output can be overridden:
* through options.output_data_dir, with first precendence;
* through the environment variable HADOOP_GALAXY_DATA_DIR.
In these cases the Hadoop job output will be sent to
options.output_data_dir/os.path.basename(options.output)
The name of the last component of the path (os.path.basename(...)) can be
explicitly set by passing a value for the `name` function argument.
"""
if name:
suffix_path = name
else:
# We'll use the name of the output file as the name of the data file,
# knowing that the datapath (below) will be calculated as to not put data
# and pathset file in the same place.
suffix_path = os.path.basename(options.output)
if options.output_data_dir:
datapath = options.output_data_dir
elif os.environ.get(EnvOutputDataDir):
datapath = os.environ.get(EnvOutputDataDir)
else:
datapath = os.path.join(os.path.dirname(options.output), self.HadoopOutputDirName)
p = os.path.join(datapath, suffix_path)
log.info("Hadoop job data output path %s", p)
return p
def _configure_for_job(self, options):
self._cmd_env = copy.copy(os.environ)
conf_file = options.conf or os.environ.get(EnvConfPath)
if conf_file:
log.debug("loading config from %s", conf_file)
try:
with open(conf_file) as f:
self.conf = yaml.load(f)
log.debug("loaded conf: %s", self.conf)
except IOError as e:
log.critical("Couldn't read the specified configuration from %s", conf_file)
log.exception(e)
sys.exit(1)
except yaml.YAMLError as e:
log.critical("Error parsing configuration file %s", conf_file)
log.exception(e)
raise
else:
self.conf = dict()
self._set_hadoop_conf()
if options.remaining_args:
self._runner.parse_args(options.remaining_args)
# If the configuration specifies a dict for 'tool_env' use it to override
# environment variables
tool_env = self.conf.get('tool_env')
if tool_env:
log.debug("Overriding environment variables from configuration")
for k, v in tool_env.iteritems():
log.debug("env[%s] = %s", k, v)
self._cmd_env[k] = v
if log.isEnabledFor(logging.INFO):
log.info("Hadoop settings:")
for k, v in self._cmd_env.iteritems():
if k.startswith("HADOOP"):
log.info("%s = %s", k, v)
def run(self, options):
log.debug("options: %s", options)
self._configure_for_job(options)
# load input pathset
with open(options.input) as f:
input_pathset = FilePathset.from_file(f)
log.debug("Read input pathset: %s", input_pathset)
# new pathset with a single output path
output_pathset = FilePathset(self.gen_data_output_path(options))
try:
self._runner.set_input(input_pathset)
self._runner.set_output(output_pathset)
log.debug("Executing: %s", self._runner)
self._runner.execute(log, self._cmd_env)
with open(options.output, 'w') as f:
output_pathset.write(f)
except subprocess.CalledProcessError as e:
log.exception(e)
if e.returncode < 0:
msg = "%s was terminated by signal %d" % (options.executable, e.returncode)
elif e.returncode > 0:
msg = "%s exit code: %d" % (options.executable, e.returncode)
log.critical(msg)
raise RuntimeError(msg)
except OSError as e:
log.critical("Command execution failed")
log.exception(e)
raise e
def main(args=None):
hg = HadoopGalaxy()
parser = hg.build_parser()
parser.add_argument('--executable', metavar="Program", help="The Hadoop program to run")
options = hg.parse_args(parser, args)
if not options.executable:
raise ValueError("You need to specify the program to run with the --executable option")
hg.runner = HadoopToolRunner(options.executable)
hg.run(options)
|
crs4/hadoop-galaxy
|
hadoop_galaxy/__init__.py
|
Python
|
bsd-3-clause
| 14,205
|
[
"Galaxy"
] |
193cb271b4e815f969aadba0a98acdb10882ef53f1fdf0364c0afa8f979ad167
|
import os
import collections
import copy
import subprocess
import functools
import datetime
import operator
import re
import numpy as np
import netCDF4
import cf_units
from giss import memoize,ioutil,ncutil,giutil,xaccess,gidate,checksum
from giss.functional import *
from giss import functional
from giss.xaccess import *
from ectl import rundeck
"""Stuff having to do with output files of ModelE (i.e. acc files,
results of scaleac, etc."""
# -------------------------------------------------
def extract_I(fname, keys=None):
"""Given a ModelE output files (acc) or derivative thereof
(aic, ijhc, etc)... determines the I file in place when that
output files was created.
This is a little heuristic. But the general idea is as follows:
0. See if the entire I file is encoded in the NetCDF file itself.
This is the best.
1. Look for an answer in files.I NetCDF attribute. ModelE and
scaleacc do not write these. But a post-processing program
could add them easily, based on file timestamps. Once this
is written, ModelE output files become portable...
2. If it's an acc file, look at file timestamps to determine
which log directory contains the appropriate I file. If it's
not an acc file, look for the corresponding acc file.
3.
keys:
The set of keys to look up.
If a linked file is desired, use the key '_file_XXX'
Returns:
List of (key, value)
Do dict(extract_I_from_output_file()) if you want this in a dict.
"""
# For now... just look for an I file in the same directory.
dir = os.path.split(fname)[0]
rd = rundeck.load_I(os.path.join(dir, 'I'))
return [(key, rd[key].parsed) for key in
(rd.params.keys() if keys is None else keys)]
# return rundeck.rundeck_to_dict()
# ---------------------------------------------------
_topo_keys = (
('files', 'icebin_in', '_file_icebin_in'),
('segments', 'names', 'segment_names'),
('segments', 'bases', 'segment_bases'))
@memoize.local()
def read_topo(topo_file):
"""Given a ModelE output file that was the result of an elevation
class run, obtains the Icebin input file used for it."""
ret = dict()
with netCDF4.Dataset(topo_file, 'r') as nc:
for vname, attrname, oname in _topo_keys:
try:
ret[oname] = getattr(nc.variables[vname], attrname)
except:
raise
pass # variable / value does not exist in this TOPO file
return ret
# ---------------------------------------------------
@memoize.files()
class scaleacc(object):
hash_version = 0
def __init__(self, _ofpat, section, acc_dir=None, params=dict()):
"""ofpat:
Pattern to use for the output file name.
Eg: /home/me/JUN1951.{section}E4F40.R.nc
section:
Section of ACC file we want (eg: 'aij', 'ijhc', etc)
acc_dir:
Directory to find corresponding acc files (if needed)
params:
Additional attributes to add to the params variable in the
output NetCDF file (will not ovewrite).
"""
ofpat = os.path.abspath(_ofpat)
self.odir,leafpat = os.path.split(ofpat)
ofname = os.path.join(self.odir,
leafpat.format(section=section))
ifname = os.path.join(self.odir if acc_dir is None else acc_dir,
leafpat.format(section='acc'))
ofname = os.path.abspath(ofname)
ifname = os.path.abspath(ifname)
self.section = section
self.params = params
# Required by @memoize.files()
self.inputs = [ifname]
self.outputs = [(ofname, (ifname,))]
self.value = self.outputs[0][0]
def __call__(self):
try:
os.makedirs(self.odir)
except Exception as e:
# print(e)
pass
with ioutil.pushd(self.odir):
cmd = ['scaleacc', self.inputs[0], self.section]
subprocess.check_output(cmd)
# Rewrite the scaled file, with additional info
ofname = self.outputs[0][0]
tmpname = ofname + '.tmp'
os.rename(ofname, tmpname)
try:
# Copy the whole thing to NetCDF4 and add attributes
with netCDF4.Dataset(ofname, 'w') as ncout:
oparam = ncout.createVariable('param', 'i')
# Copy our default parameters
for key,val in self.params.items():
oparam.setncattr(key, val)
# Copy metadata from ACC file
with netCDF4.Dataset(self.inputs[0], 'r') as accin:
for vname in ('rparam', 'iparam', 'cparam'):
ivar = accin.variables[vname]
for key in ivar.ncattrs():
oparam.setncattr(key, ivar.getncattr(key))
# Copy metadata out of the TOPO file (if we can still find it)
TOPO = oparam.getncattr('_file_topo')
if os.path.exists(TOPO):
oparam.setncattr('topo_params_found',1)
for key,value in read_topo(TOPO).items():
oparam.setncattr(key, value)
# Copy data from the temporary scaleacc output file
with netCDF4.Dataset(tmpname, 'r') as ncin:
nccopy = ncutil.copy_nc(ncin, ncout)
nccopy.define_vars(zlib=True)
nccopy.copy_data()
finally:
os.remove(tmpname)
return self.value
# --------------------------------------------------------
@function()
def fetch_file(file_name, var_name, *index, region=None):
"""index:
If the variable has elevation classes:
(segment, <numeric indexing>)
(segment = elevation class segment we're indexing into)
If no elevation classes:
Regular numeric indexing"""
print('Fetching Data:', file_name, var_name, index, region)
# ------ Get variable attributes
kwargs = {'missing_threshold' : 1.e25}
attrsW = ncutil.ncattrs(file_name, var_name)
attrs = attrsW() # Unwrap
# ------ Add ModelE parameters file to attributes...
# (TODO: Look for I file in same directory if params not in the ijhc file)
nc = ncutil.ncopen(file_name)
for vname in ('param', 'cparam', 'iparam', 'rparam'):
if vname in nc.variables:
Ivar = nc.variables[vname]
for key in Ivar.ncattrs():
attrs[('param', key)] = getattr(Ivar, key)
# ------- Add TOPO parameters to the attributes....
if ('param', 'topo_params_found') not in attrs:
# Copy metadata out of the TOPO file (if we can still find it)
try:
TOPO = attrs[('param','_file_topo')]
if os.path.exists(TOPO):
attrs[('param', 'topo_params_found')] = 1
for key,value in read_topo(TOPO).items():
attrs[('param', key)] = value
except KeyError:
pass # Some files don't have this param
# --------- Adjust indexing based on the ec_segment
dims = {d:i for i,d in enumerate(attrs[('var', 'dimensions')])}
try:
ihp_d = giutil.get_first(dims, ('nhp', 'nhc'))
except KeyError:
ihp_d = None
if ihp_d is not None:
# ------ Variable has elevation classes
ec_segment = index[0]
if not isinstance(ec_segment, str):
raise ValueError('Error in indexing; did you forget to add an ec_segment argument?')
attrs[('fetch', 'ec_segment')] = ec_segment
# This is elevation-classified; first index will be a segment string
segment_names = attrs[('param', 'segment_names')].split(',')
segment_ix = segment_names.index(ec_segment)
# Rest of index is indices into multi-dim variable
xindex = list(copy.copy(index[1:]))
segment_bases = attrs[('param', 'segment_bases')]
subdim = (segment_bases[segment_ix], segment_bases[segment_ix+1])
xindex[ihp_d] = xaccess.reslice_subdim(xindex[ihp_d], subdim)
# Determine the grid this is on
if ec_segment == 'ec':
attrs[('fetch', 'grid')] = 'elevation'
attrs[('fetch', 'grid', 'correctA')] = True
elif ec_segment == 'legacy':
attrs[('fetch', 'grid')] = 'atmosphere'
else:
# Don't know what grid it's on
pass
else:
# ------- Variable has no elevation classes
xindex = index
if ('jm' in dims and 'im' in dims and abs(dims['jm']-dims['im']) == 1):
# jm,im detected... (RSF files)
attrs[('fetch', 'grid')] = 'atmosphere'
elif ('lat' in dims and 'lon' in dims and abs(dims['lat']-dims['lon']) == 1):
# lat,lon detected... (scaled files)
attrs[('fetch', 'grid')] = 'atmosphere'
else:
# Don't know what grid it's on
pass
# Add attributes based on a low-level ncdata fetch
ncutil.add_fetch_attrs(attrs, file_name, var_name, *xindex, **kwargs)
# The function to use, when/if we want a plotter for this.
plotter_kwargs = {}
if region is not None:
plotter_kwargs['region'] = region
attrs[('plotter', 'kwargs')] = plotter_kwargs
attrs[('plotter', 'function')] = ('modele.plot', 'get_plotter') # Name of function
return ncutil.FetchTuple(
attrsW,
bind(ncutil.ncdata, file_name, var_name, *xindex, **kwargs))
# ncutil.data_to_xarray(attrsW,
# bind(ncutil.ncdata, file_name, var_name, *xindex, **kwargs)))
# ----------------------------------------------------------------
months_itoa = ('<none>', 'JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC')
months_atoi = {s:i for i,s in enumerate(months_itoa)}
_dateREs = r'(\d*)(JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(\d+)'
_sectionsREs = r'(acc|rsf|adiurn|agc|aij|aijk|aijl|aijmm|aj|ajl|areg|consrv|icij|ijhc)'
# Regular expression matches ModelE output filenames
_fileRE = re.compile(_dateREs + r'\.' + _sectionsREs + r'(.*?)\.nc')
def _extract_if_equal(mylist, ix):
"""Returns lst[0][ix] if list[n][ix] is equal for all n.
Else returns None."""
if len(set(item[ix] for item in mylist)) == 1:
return mylist[0][ix]
else:
return None
ModelEFile = collections.namedtuple('ModelEFile', ('rundeck', 'section', 'date', 'fname'))
@memoize.local()
def get_groups(dir, filter_fn = lambda rundeck, section, date, fname : True):
"""Lists the ModelE files in a directory.
dir:
Directory to list files in
filter_fn:
Tells which files to keep. See filter_group() below.
Returns an OrderedDict of OrderedDicts (all sorted):
groups[(rundeck,section)][date] -->
namedtuple(rundeck,section, date, fname)
"""
# Poke around, see the name pattern for files in this directory
files = list()
for leaf in os.listdir(dir):
match = _fileRE.match(leaf)
if match is None:
continue
# Parse out the parts of the ModelE filename
sday = match.group(1)
day = int(sday) if len(sday) > 0 else 1
month = months_atoi[match.group(2)]
year = int(match.group(3))
date = gidate.Date(year, month, day)
section = match.group(4)
rundeck = match.group(5)
# Only keep the files we like
fname = os.path.join(dir, leaf)
rec = ModelEFile(rundeck, section, date, fname)
if filter_fn(*rec):
files.append(rec)
groups = collections.OrderedDict() # One dict per (rundeck, section)
if len(files) == 0:
return groups
# Separate files list by (rundeck, section)
files.sort()
files.append(ModelEFile(None,None,None,None)) # Sentinel
accum = collections.OrderedDict()
accum[files[0].date] = files[0] # accum[date] = rec
accum0 = files[0]
for rec in files[1:]:
if (rec[0:2] != accum0[0:2]):
groups[tuple(accum0[0:2])] = accum
accum = collections.OrderedDict()
accum0 = rec
accum[rec.date] = rec
return groups
def get_one_group(*args, **kwargs):
"""Returns files from a single group from get_groups(); or throws exception"""
groups = get_groups(*args, **kwargs)
# Quit if our filter returned files from >1 group
if len(groups) > 1:
raise ValueError('More than one group of files found in {}'.format(dir))
return next(iter(groups.items())) # (rundeck, section), files
@memoize.local()
class filter_group(object):
"""Filter pattern so rundeck==rundeck, section==section and date0<=date<date1"""
def __init__(self, rundeck=None, section=None, date0=None, date1=None):
self.rundeck = rundeck
self.section = section
self.date0 = date0
self.date1 = date1
hash_version = 0
def hashup(self,hash):
checksum.hashup(hash, (self.rundeck, self.section, self.date0, self.date1))
def __call__(self, rundeck, section, date, fname):
if self.rundeck is not None and self.rundeck != rundeck:
return False
if self.section is not None and self.section != section:
return False
if self.date0 is not None and date < self.date0:
return False
if self.date1 is not None and date >= self.date1:
return False
return True
# Pre-defined filter to give everything
_all_files = filter_group()
@function()
def _fetch_from_dir(mydir, filter_fn, var_name, year, month, *index, **kwargs):
"""Fetches data out of a rundir, treating the entire rundir like a dataset.
run:
A ModelE run directory.
run_name:
The trailing part of files (or None, if auto-detect)
Eg: MAR1964.ijhcE027testEC-ec.nc, run_name = 'E027testEC-ec'
"""
if filter_fn is None:
filter_fn = _all_files
# Read the files out of the directory
_,files = get_one_group(mydir, filter_fn=filter_fn)
# Get the filename (if the file exists)
date = gidate.Date(year, month,1)
file = files[date]
ret = fetch_file(file.fname, var_name, *index, **kwargs)
attrs = ret.attrs()
attrs[('fetch', 'date')] = date
attrs[('fetch', 'rundeck')] = file.rundeck
attrs[('fetch', 'section')] = file.section
return ret
# ----------------------------------------------------------------
def _get_scaled_fname(mydir, section, year, month):
"""Finds a scaled file inside of a ModelE run directory"""
#filter_fn = filter_group(section=section)
# ----- Determine what kind of directory we were given: ACC or scaled.
groups = get_groups(mydir, filter_group(section='acc'))
if len(groups) == 1:
# mydir contains ACC files; glean the rundeck name off of that
rundeck,_ = next(iter(groups.keys()))
acc_dir = mydir
scaled_dir = os.path.join(acc_dir, 'scaled')
elif len(groups) == 0:
# mydir contains no ACC files; let's see if it contains scaled files
# (If no scaled or acc files, this will raise)
(rundeck, _),_ = get_one_group(mydir, filter_group(section=section))
acc_dir = None
scaled_dir = mydir
else:
raise ValueError('Found more than one group in {}'.format(mydir))
# Determine what the scaled file SHOULD be called
scaled_pat = '{month}{year:04d}.{section}{rundeck}.nc'.format(
month=months_itoa[month],
year=year, section='{section}', rundeck=rundeck)
scaled_leaf = scaled_pat.format(section=section)
# Look for scaled file pre-existing in scaled_dir dir (we didn't put it there)
if acc_dir is None: # Just a plain scaled dir
fname = os.path.join(scaled_dir, scaled_leaf)
if os.path.exists(fname):
return fname
raise Exception('Cannot find file {} in scaled directory {}'.format(scaled_leaf, scaled_dir))
# Create it in the scaled/ directory
return scaleacc(
os.path.join(scaled_dir, scaled_pat),
section, acc_dir=acc_dir)
@function()
def fetch_from_dir(mydir, section, var_name, year, month, *index, **kwargs):
if section == 'acc' or section == 'rsf':
# Fetch an unscaled file; either it's there or it's not
return _fetch_from_dir(mydir, filter_group(section=section),
var_name, year, month, *index, **kwargs)
else:
# Fetch a scaled file; can run scaleacc
fname = _get_scaled_fname(mydir, section, year, month)
ret = fetch_file(fname, var_name, *index, **kwargs)
attrs = ret.attrs()
attrs[('fetch', 'date')] = gidate.Date(year, month)
return ret
# ----------------------------------------------------------------
|
citibeth/modele-control
|
lib/modele/io.py
|
Python
|
lgpl-2.1
| 16,915
|
[
"NetCDF"
] |
00bba4abb90218478e498b50396cc551c8208c90f6796795499bbcf6b06bd27e
|
from splinter.browser import Browser
from time import sleep
#traceback模块 跟踪异常返回信息
import traceback
#ConfigParser模块 读取配置文件
import configparser
import string, os, sys
import notify
# 读取配置文件
cf = configparser.ConfigParser()
cf.read("app.conf")
# 读取12306账户名,密码
uname = cf.get("user", "name")
upasswd=cf.get("user", "password")
print ("username:"+uname);
# 读取乘客
pa = cf.get("ticket", "passenger_name")
print ("passenger:"+pa);
# 读取购车序号,选择第几趟,0则从上之下依次点击
order = cf.get("ticket", "order_no")
#设定乘坐类型
train_type = cf.get("ticket", "train_type")
# 读取起始地址的cookies值要自己去找
start_sta = cf.get("ticket", "start_station")
end_sta =cf.get("ticket", "end_station")
print (start_sta+"/"+end_sta);
# 读取系统配置
interval = cf.getint("system", "interval")
# 时间格式2016-02-01
dtime = cf.get("ticket", "start_date")
#设定乘坐车次 暂时无用 测试有时候有效有时候出错 12306问题
train_no = cf.get("ticket", "train_no")
#设定网址
ticket_url = cf.get("site", "ticket_url")
login_url = cf.get("site", "login_url")
initmy_url = cf.get("site", "initmy_url")
#登录网站
def login():
b.find_by_text(u"登录").click();
sleep(3);
#自动登录,uname是12306账号名,upasswd是12306密码
b.fill("loginUserDTO.user_name", uname)
sleep(1)
b.fill("userDTO.password", upasswd)
sleep(1)
print(u"等待验证码,自行输入...")
while True:
if b.url != initmy_url:
sleep(1)
else:
break
#购票
def huoche():
global b
#使用splinter打开chrome浏览器
b = Browser(driver_name="chrome")
#返回购票页面
b.visit(ticket_url)
while b.is_text_present(u"登录"):
sleep(1)
login()
if b.url == initmy_url:
break;
try:
print (u"购票页面...");
# 跳回购票页面
b.visit(ticket_url)
# 加载查询信息
b.cookies.add({"_jc_save_fromStation": start_sta})
b.cookies.add({"_jc_save_toStation": end_sta})
b.cookies.add({"_jc_save_fromDate": dtime})
b.reload()
sleep(2)
count = 0
# 选择车次
#b.find_by_id(u"show_more").click();
#b.find_by_id(u"inp-train").fill(train_no);
#b.find_by_id(u"add-train").click();
#b.find_by_id(u"show_more").click();
# 选择类型
b.find_by_text(train_type).click()
# 循环点击预订
if order != 0:
while b.url == ticket_url:
b.find_by_text(u"查询").click()
count +=1
print (u"循环点击查询... 第 %s 次" % count)
sleep(interval)
try:
b.find_by_text(u"预订")[order - 1].click()
except:
print (u"不能预订")
continue
else:
while b.url == ticket_url:
b.find_by_text(u"查询").click()
count += 1
print (u"循环点击查询... 第 %s 次" % count)
sleep(interval)
try:
for i in b.find_by_text(u"预订"):
i.click()
except:
print (u"不能预订")
continue
sleep(1)
b.find_by_text(pa)[1].click()
notify.Beep(300, 3000);
notify.MessageBoxW('看票','票来了')
print (u"快输入验证码抢票啊啊 啊")
except Exception as e:
print(traceback.print_exc())
if __name__ == "__main__":
huoche()
|
lnO4X/rush_train_py
|
rush_train.py
|
Python
|
apache-2.0
| 3,764
|
[
"VisIt"
] |
15fbcf6ff08f9ee4e9b72675f13193096ef9e492b7322f4eea02b032423cfbef
|
#
# Copyright (c) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" utility functionality for clustering molecules using fingerprints
includes a command line app for clustering
Sample Usage:
python ClusterMols.py -d data.gdb -t daylight_sig \
--idName="CAS_TF" -o clust1.pkl \
--actTable="dop_test" --actName="moa_quant"
"""
import numpy
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols, MolSimilarity
from rdkit.ML.Cluster import Murtagh
import pickle
message = FingerprintMols.message
error = FingerprintMols.error
def GetDistanceMatrix(data, metric, isSimilarity=1):
""" data should be a list of tuples with fingerprints in position 1
(the rest of the elements of the tuple are not important)
Returns the symmetric distance matrix
(see ML.Cluster.Resemblance for layout documentation)
"""
nPts = len(data)
res = numpy.zeros((nPts * (nPts - 1) // 2), numpy.float)
nSoFar = 0
for col in range(1, nPts):
for row in range(col):
fp1 = data[col][1]
fp2 = data[row][1]
if fp1.GetNumBits() > fp2.GetNumBits():
fp1 = DataStructs.FoldFingerprint(fp1, fp1.GetNumBits() / fp2.GetNumBits())
elif fp2.GetNumBits() > fp1.GetNumBits():
fp2 = DataStructs.FoldFingerprint(fp2, fp2.GetNumBits() / fp1.GetNumBits())
sim = metric(fp1, fp2)
if isSimilarity:
sim = 1. - sim
res[nSoFar] = sim
nSoFar += 1
return res
def ClusterPoints(data, metric, algorithmId, haveLabels=False, haveActs=True,
returnDistances=False):
message('Generating distance matrix.\n')
dMat = GetDistanceMatrix(data, metric)
message('Clustering\n')
clustTree = Murtagh.ClusterData(dMat, len(data), algorithmId, isDistData=1)[0]
acts = []
if haveActs and len(data[0]) > 2:
# we've got activities... use them:
acts = [int(x[2]) for x in data]
if not haveLabels:
labels = ['Mol: %s' % str(x[0]) for x in data]
else:
labels = [x[0] for x in data]
clustTree._ptLabels = labels
if acts:
clustTree._ptValues = acts
for pt in clustTree.GetPoints():
idx = pt.GetIndex() - 1
pt.SetName(labels[idx])
if acts:
try:
pt.SetData(int(acts[idx]))
except Exception:
pass
if not returnDistances:
return clustTree
else:
return clustTree, dMat
def ClusterFromDetails(details):
""" Returns the cluster tree
"""
data = MolSimilarity.GetFingerprints(details)
if details.maxMols > 0:
data = data[:details.maxMols]
if details.outFileName:
try:
outF = open(details.outFileName, 'wb+')
except IOError:
error("Error: could not open output file %s for writing\n" % (details.outFileName))
return None
else:
outF = None
if not data:
return None
clustTree = ClusterPoints(data, details.metric, details.clusterAlgo, haveLabels=0, haveActs=1)
if outF:
pickle.dump(clustTree, outF)
return clustTree
_usageDoc = """
Usage: ClusterMols.py [args] <fName>
If <fName> is provided and no tableName is specified (see below),
data will be read from the text file <fName>. Text files delimited
with either commas (extension .csv) or tabs (extension .txt) are
supported.
Command line arguments are:
- -d _dbName_: set the name of the database from which
to pull input fingerprint information.
- -t _tableName_: set the name of the database table
from which to pull input fingerprint information
- --idName=val: sets the name of the id column in the input
database. Default is *ID*.
- -o _outFileName_: name of the output file (output will
be a pickle (.pkl) file with the cluster tree)
- --actTable=val: name of table containing activity values
(used to color points in the cluster tree).
- --actName=val: name of column with activities in the activity
table. The values in this column should either be integers or
convertible into integers.
- --SLINK: use the single-linkage clustering algorithm
(default is Ward's minimum variance)
- --CLINK: use the complete-linkage clustering algorithm
(default is Ward's minimum variance)
- --UPGMA: use the group-average clustering algorithm
(default is Ward's minimum variance)
- --dice: use the DICE similarity metric instead of Tanimoto
- --cosine: use the cosine similarity metric instead of Tanimoto
- --fpColName=val: name to use for the column which stores
fingerprints (in pickled format) in the input db table.
Default is *AutoFragmentFP*
- --minPath=val: minimum path length to be included in
fragment-based fingerprints. Default is *2*.
- --maxPath=val: maximum path length to be included in
fragment-based fingerprints. Default is *7*.
- --nBitsPerHash: number of bits to be set in the output
fingerprint for each fragment. Default is *4*.
- --discrim: use of path-based discriminators to hash bits.
Default is *false*.
- -V: include valence information in the fingerprints
Default is *false*.
- -H: include Hs in the fingerprint
Default is *false*.
- --useMACCS: use the public MACCS keys to do the fingerprinting
(instead of a daylight-type fingerprint)
"""
if __name__ == '__main__':
message("This is ClusterMols\n\n")
FingerprintMols._usageDoc = _usageDoc
details = FingerprintMols.ParseArgs()
ClusterFromDetails(details)
|
greglandrum/rdkit
|
rdkit/Chem/Fingerprints/ClusterMols.py
|
Python
|
bsd-3-clause
| 5,671
|
[
"RDKit"
] |
5bf9e16ed73f8c3435f5a2259d7c63380b1e6b42af53a2bc4774be1589a60b82
|
#!/afs/crc.nd.edu/x86_64_linux/python/3.4.0/gcc-4.8.0/bin/python3
# umbrella.py - set up umbrella sampling runs from a single steered MD run in GROMACS.
__author__ = "Ken Newcomb"
import os
import sys
import numpy
import shutil
### Inputs ###
num_frames = int(input("Number of frames to analyze: "))
distances = numpy.arange(0, 1.201, 0.025).tolist()
# Make folder structure.
if not os.path.exists('confs'):
os.makedirs('confs')
if not os.path.exists('dists'):
os.makedirs('dists')
if not os.path.exists('holds'):
os.makedirs('holds')
print("Folder structure generated.")
# Extract steered MD trajectory frame by frame.
ans = input("Do you need to extract steered MD frames? ")
if ans == "Y":
filename = input("Name of pull files: ")
print("Separating frames...")
os.system("echo 0 | gmx trjconv -f {0}.trr -s {0}.tpr -o confs/conf.gro -sep &> /dev/null".format(filename))
print("Done.")
# Call GROMACS and get distances between groups
ans = input("Do you need distances from GROMACS? (Y/n) ")
if ans == 'Y':
filename = input("Name of pull files: ")
for i in range (0, num_frames+1):
sys.stdout.write("\rProcessing configuration {0}...".format(i))
sys.stdout.flush()
os.system("echo com of group r_1 plus com of group r_2 | gmx distance -s {0}.tpr -f confs/conf{1}.gro -n index.ndx -oall dists/dist{1}.xvg &>/dev/null".format(filename, i))
# Generate a summary file containing the configuration index
# and the distance between the two groups.
print("Generating summary file.")
summary_file = open("summary_distances.dat", 'w')
for i in range(0, num_frames+1):
dist_file = open("dists/dist{0}.xvg".format(i), 'r')
dist_list = dist_file.readlines()
distance = dist_list[15].split()[1]
print(distance)
summary_file.write("{0} {1}\n".format(i, distance))
dist_file.close()
summary_file.close()
# Find configurations closest to given distances
print("Finding best configurations for umbrella sampling...")
summary_file = open("summary_distances.dat", 'r')
summary_list = summary_file.readlines()
desired_points = []
for dist in distances:
distance_number = distances.index(dist)
for line in summary_list:
split_line = line.split()
# If first time through loop, take this as the best candidate
if summary_list.index(line) == 0:
desired_points.append([(float(split_line[1])), split_line[0]])
# If not, check to see if this value is a better candidate
else:
if abs(dist - float(split_line[1])) < abs(dist- desired_points[distance_number][0]):
desired_points[distance_number] = [float(split_line[1]), split_line[0]]
# Copy configurations to holds/
print("Finished. Last step: Copying over selected configurations.")
i = 0
for point in desired_points:
if not os.path.exists("holds/{0}/".format(i+1)): os.makedirs("holds/{0}/".format(i+1))
shutil.copyfile("confs/conf{0}.gro".format(point[1]), "holds/{0}/conf{1}.gro".format(i+1, point[1]))
print("Configuration {0}: Target: {1:.3f}, Actual: {2:.3f}, Config: {3}".format(i, distances[i], point[0], point[1]))
i += 1
|
hsidky/MolSimScripts
|
umbrella.py
|
Python
|
mit
| 3,022
|
[
"Gromacs"
] |
c4d736d837886441ce79f29a14298726956c254c21ccc15df74ce7e7f1b3d471
|
# -*- coding: utf-8 -*-
import sys
import random
import math
import random
import pylab
N=50
dt=0.4
as0=[45,105,-0.05,-0.01]
a0=[50,100,-0.06,-0.02]
Rv=25
f0=[[16,0,0,0],[0,16,0,0],[0,0,0.04,0],[0,0,0,0.04]]
H=lambda t,a:a[0]*math.sin(a[2]*t)+a[1]*math.sin(a[3]*t)
ir=range(4)
#tk=[i*dt for i in k]
fek=f0
def aS(k):
if k==0:
return as0
tk=k*dt
ase=aS(k-1) #step 1
u=H(tk,a0)+Rv**0.5*random.gauss(0,1) #step 2
use=H(tk,ase) #step 7
#step 4:
z=[math.sin(ase[2]*tk),math.sin(ase[3]*tk),ase[0]*tk*math.cos(ase[2]*tk),ase[1]*tk*math.cos(ase[3]*tk)]
b=[sum([fek[i][e]*z[e] for e in ir]) for i in ir]
#step 5:
puu=sum([z[m]*b[m] for m in ir])+Rv
#step 6:
for i in ir:
for e in ir:
fek[i][e]-=b[i]*b[e]/puu
a=[ase[i]+b[i]/puu*(u-use) for i in ir]
print (a)
return a
print ("start")
asn=aS(N)
pylab.plot(range(100),[H(t,asn) for t in range(100)],'r')#конечное приближение
pylab.plot(range(100),[H(t,a0) for t in range(100)])
pylab.plot(range(100),[H(t,as0) for t in range(100)],'g')#первоначальное приближение
pylab.grid(True)
pylab.show()
print ("plot")
|
drewdru/AOI
|
roadLaneFinding/khalman.py
|
Python
|
gpl-3.0
| 1,124
|
[
"ASE"
] |
d878261754096dd4ab490594611aa74a64637b33cd9d84b79f1cb6d66bc90edb
|
import collections
from PyQt5 import QtCore, QtWidgets
from LineSettingsWidget import LineSettingsWidget
import peacock
import mooseutils
class LineGroupWidget(peacock.base.MooseWidget, QtWidgets.QGroupBox):
"""
A GroupBox containing the artist toggles for each postprocessor in the supplied data object.
Args:
data[PostprocessorDataWidget]: The data object for which toggles will be created.
cycle[itertools.product]: An iterable container with linestyle and color
args: Arguments passed to the QWidget object.
Kwargs:
cycle[itertools.product]: The style, color pairings to use instead of default (for testing, see test_ArtistGroupWidget)
key, value pairs are passed to the MooseWiget object.
"""
#: pyqtSignal: Emitted when the widget has been initialized (used for updating geometry in PostprocessorSelectWidget)
initialized = QtCore.pyqtSignal()
#: pyqtSignal: Emitted when plot is refreshed
variablesChanged = QtCore.pyqtSignal()
#: pyqtSignal: Emitted when the axes needs to be update
axesModified = QtCore.pyqtSignal()
def __init__(self, axes, data, cycle, *args, **kwargs):
super(LineGroupWidget, self).__init__(**kwargs)
QtWidgets.QWidget.__init__(self, *args)
# Extract the line style/color cycle (for testing)
self._cycle = cycle
# Store the data and initialize the storage of the toggles to be created.
self._axes = axes
self._data = data
self._toggles = collections.OrderedDict() # maintains order with widget creation, so variable labels remain in order
self._artists = dict() # artist storage to allow removing lines
self._initialized = False
self._time = None # The time index to extract
# Setup this QGroupBox
self.setTitle(data.filename())
self.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
self.setFixedWidth(510)
# The layout to which the toggles will be added
self.MainLayout = QtWidgets.QVBoxLayout(self)
self.MainLayout.setContentsMargins(5, 0, 0, 0)
self.MainLayout.setSpacing(0)
self.setLayout(self.MainLayout)
# No data moose
self.NoDataMessage = QtWidgets.QLabel('\nNo data currently available, this will update automatically.\n')
# Builds the primary axis/variable controls
self.AxisSelectLayout = QtWidgets.QHBoxLayout()
self.AxisSelectLayout.setSpacing(10)
self.AxisSelectLayout.setContentsMargins(0, 0, 0, 0)
self.AxisVariableLabel = QtWidgets.QLabel('Primary Variable:')
self.AxisVariable = QtWidgets.QComboBox()
self.AxisVariable.setFocusPolicy(QtCore.Qt.StrongFocus)
self.AxisSelectLayout.addWidget(self.AxisVariableLabel)
self.AxisSelectLayout.addWidget(self.AxisVariable)
self.AxisSelectLayout.addStretch(1)
# Add the primary axis/variable controls to the main layouts
self.MainLayout.addLayout(self.AxisSelectLayout)
self.MainLayout.addWidget(self.NoDataMessage)
# Do not show anything until initialized
self.AxisVariableLabel.setVisible(False)
self.AxisVariable.setVisible(False)
# Connect data reload timer
self._data.dataChanged.connect(self.onDataChanged)
# Call the update (this will do nothing if data is not yet available and initialize if it is)
self.plot()
def showEvent(self, *args):
"""
Try to load the data when the widget appears.
"""
self._data.load()
@QtCore.pyqtSlot()
def onDataChanged(self):
"""
Slot called when the dataChanged signal is emitted from the data widget.
"""
self.plot()
def clear(self):
"""
Clears all lines created by this widget.
"""
for artists in self._artists.itervalues():
for artist in artists:
artist.remove()
del artist
self._artists.clear()
def plot(self, time=None):
"""
Updates the plot with the select lines, for the current time.
To update the time, use setTime()
"""
# Update the time if supplied
if time != None:
self._time = time
# Do nothing if data does not exist
if not self._data:
self._reset()
self.axesModified.emit()
return
# Perform initialization
if not self._initialized:
self._initialize()
# Remove existing plots
self.clear()
# Plot nothing if the time is not valid
times = self._data.times()
if (self._time != None) and (times != []) and (self._time not in times):
self.setEnabled(False)
# Plot the lines
else:
self.setEnabled(True)
# Extract the primary variable data
x_var = self.AxisVariable.currentText()
x = self._data(x_var)
# Loop through all the line settings toggles and create lines
y_vars = [[], []]
for variable, toggle in self._toggles.iteritems():
if toggle.isValid():
settings = toggle.settings()
i = settings.pop('axis')
y_vars[i].append(variable)
y = self._data(variable, time=self._time, warning=False)
if self._axes[i]:
self._artists[variable] = self._axes[i].plot(x, y, **settings)
# Emit variable names
self.variablesChanged.emit()
# Re-draw the figure
self.axesModified.emit()
def getAxisLabels(self):
"""
Return the active x,y axis labels.
"""
# x
x_var = self.AxisVariable.currentText()
y_vars = []
y2_vars = []
for variable, toggle in self._toggles.iteritems():
if toggle.isValid():
if toggle.axis() == 'right':
y2_vars.append(variable)
else:
y_vars.append(variable)
return x_var, y_vars, y2_vars
def isValid(self):
"""
Returns True if any lines are active.
"""
return any([toggle.isValid() for toggle in self._toggles.itervalues()])
def repr(self):
"""
Outputs data for creating python script.
see PostprocessorPlotWidget
"""
# Do nothing if no data is selectd
if not any([toggle.isValid() for toggle in self._toggles.itervalues()]):
return [], []
# Read the data
output, imports = self._data.repr()
# Get x-axis data
if self._time:
output += ['x = data({}, time={})'.format(repr(str(self.AxisVariable.currentText())), repr(self._time))]
else:
output += ['x = data({})'.format(repr(str(self.AxisVariable.currentText())))]
# Plot the results
for toggle in self._toggles.itervalues():
if toggle.isValid():
out, imp = toggle.repr(time=self._time)
output += ['']
output += out
imports += imp
return output, imports
def _reset(self):
"""
Resets the state of the widget to pre-initialized, so if data disappears so does the plot.
"""
# Clear the plot
self.clear()
# Clear the widgets
for toggle in self._toggles.itervalues():
toggle.setVisible(False) # If I don't do this, there is a ghosted image of the widget hanging around
self.MainLayout.removeWidget(toggle)
toggle.setParent(None)
self._toggles.clear()
# Show "No data" message
self.AxisVariableLabel.setVisible(False)
self.AxisVariable.setVisible(False)
self.NoDataMessage.setVisible(True)
# Emit empty axis variable names
self.variablesChanged.emit()
# Enable re-initialization
self._initialized = False
def _initialize(self, create=True):
"""
Creates LineSettingsWidget for postprocessor data. (protected)
"""
# Enabled the widget
self.NoDataMessage.setVisible(False)
self.AxisVariable.setVisible(True)
self.AxisVariableLabel.setVisible(True)
if create:
# Create a toggle control for each piece of data
for var in self._data.variables():
style, color = next(self._cycle, ('-', [0, 0, 0]))
toggle = LineSettingsWidget(var, linestyle=style, color=color)
toggle.clicked.connect(self.plot)
self.MainLayout.addWidget(toggle)
self._toggles[var] = toggle
# The widget is initialized after all the toggles have been added, so it is time to finish the setup
self._initialized = True
self.setup()
# Emit the initialization signal
self.initialized.emit()
def _setuNoDataMessage(self, qobject):
"""
Setup method for no data label.
"""
qobject.setText()
def _setupAxisVariable(self, qobject):
"""
Setup method for primary axis variable selection.
"""
qobject.currentIndexChanged.connect(self._callbackAxisVariable)
for var in self._data.variables():
qobject.addItem(var)
def _callbackAxisVariable(self):
"""
Callback for primary variable selection.
"""
var = self.AxisVariable.currentText()
for toggle in self._toggles.itervalues():
toggle.setEnabled(True)
self._toggles[var].setEnabled(False)
self.clear()
self.plot()
def filename(self):
"""
Just get the filename of the data
"""
return self._data.filename()
def sameData(self, d):
"""
Just returns a bool on whether the incoming data has the same
variables as the current variables.
"""
variable_names = [str(v) for v in d.variables()]
return sorted(self._toggles.keys()) == sorted(variable_names)
def setData(self, axes, d):
"""
Set new data, keeping all the current LineSettingsWidget
"""
self.clear()
self._data.disconnect()
self._data = d
self._axes = axes
self._data.dataChanged.connect(self.onDataChanged)
self.axesModified.emit()
self.plot()
def main(data, pp_class=mooseutils.VectorPostprocessorReader):
"""
Create widgets for running LineGroupWidget
"""
from peacock.PostprocessorViewer.PostprocessorViewer import PostprocessorViewer
from FigurePlugin import FigurePlugin
import matplotlib.pyplot as plt
import numpy as np
import itertools
# Create main widget
widget = PostprocessorViewer(plugins=[FigurePlugin])
widget.onSetFilenames(['empty_file'])
layout = widget.currentWidget().LeftLayout
window = widget.currentWidget().FigurePlugin
window.setFixedSize(QtCore.QSize(625, 625))
# Create LineGroupWidget
cycle = itertools.product(['-', '--', '-.', ':'], plt.cm.Paired(np.linspace(0, 1, 11)))
control = LineGroupWidget(window.axes(), data, cycle)
layout.addWidget(control)
control.axesModified.connect(window.onAxesModified)
def axis_label():
"""
A labeling function for setting axis labels.
"""
x,y,y2 = control.getAxisLabels()
control._axes[0].set_xlabel(x)
control._axes[0].set_ylabel('; '.join(y))
control._axes[1].set_ylabel('; '.join(y2))
control.variablesChanged.connect(axis_label)
widget.show()
return control, widget, window
if __name__ == '__main__':
import sys
import mooseutils
from peacock.PostprocessorViewer.PostprocessorDataWidget import PostprocessorDataWidget
app = QtWidgets.QApplication(sys.argv)
filename = '../../../tests/input/white_elephant_jan_2016.csv'
reader = mooseutils.PostprocessorReader(filename)
data = PostprocessorDataWidget(reader)
control, widget, window = main(data)
sys.exit(app.exec_())
|
Chuban/moose
|
python/peacock/PostprocessorViewer/plugins/LineGroupWidget.py
|
Python
|
lgpl-2.1
| 12,247
|
[
"MOOSE"
] |
d154e7e1247bef903674b53e6d4a2c77ecfc78dd062f63854e6faf5366b370fc
|
import sys, os, inspect
import os, sys, inspect, inviwopy
path_to_current_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(path_to_current_folder + "/../")
import envisionpy
import envisionpy.hdf5parser
from envisionpy.network import VisualisationManager
# Path to the vasp output directory you wish to visualise
VASP_DIR = path_to_current_folder + "/../unit_testing/resources/TiPO4_ELF"
HDF5_FILE = path_to_current_folder + "/../demo_elf2.hdf5"
# Parse for charge density visualisation.
envisionpy.hdf5parser.elf(HDF5_FILE, VASP_DIR)
envisionpy.hdf5parser.unitcell(HDF5_FILE, VASP_DIR)
# Clear any old network
inviwopy.app.network.clear()
# Initialize inviwo network
visManager = VisualisationManager(HDF5_FILE, inviwopy.app)
visManager.start("elf")
|
rartino/ENVISIoN
|
demo/elf.py
|
Python
|
bsd-2-clause
| 807
|
[
"VASP"
] |
9ae5646403ffd56aeb90b6b4d992c49413f12e833aa50cb0f92608a178b22fd1
|
# Copyright 2009 by Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the multiple alignment program PROBCONS.
"""
from __future__ import print_function
import sys
# Add path to Bio
sys.path.append('../../..')
__docformat__ = "restructuredtext en" # Don't just use plain text in epydoc API pages!
from Bio.Application import _Option, _Switch, _Argument, AbstractCommandline
class ProbconsCommandline(AbstractCommandline):
"""Command line wrapper for the multiple alignment program PROBCONS.
http://probcons.stanford.edu/
Example:
--------
To align a FASTA file (unaligned.fasta) with the output in ClustalW
format, and otherwise default settings, use:
>>> from Bio.Align.Applications import ProbconsCommandline
>>> probcons_cline = ProbconsCommandline(input="unaligned.fasta",
... clustalw=True)
>>> print(probcons_cline)
probcons -clustalw unaligned.fasta
You would typically run the command line with probcons_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Note that PROBCONS will write the alignment to stdout, which you may
want to save to a file and then parse, e.g.::
stdout, stderr = probcons_cline()
with open("aligned.aln", "w") as handle:
handle.write(stdout)
from Bio import AlignIO
align = AlignIO.read("aligned.fasta", "clustalw")
Alternatively, to parse the output with AlignIO directly you can
use StringIO to turn the string into a handle::
stdout, stderr = probcons_cline()
from StringIO import StringIO
from Bio import AlignIO
align = AlignIO.read(StringIO(stdout), "clustalw")
Citations:
----------
Do, C.B., Mahabhashyam, M.S.P., Brudno, M., and Batzoglou, S. 2005.
PROBCONS: Probabilistic Consistency-based Multiple Sequence Alignment.
Genome Research 15: 330-340.
Last checked against version: 1.12
"""
def __init__(self, cmd="probcons", **kwargs):
self.parameters = \
[
# Note that some options cannot be assigned via properties using the
# original documented option (because hyphens are not valid for names in
# python), e.g cmdline.pre-training = 3 will not work
# In these cases the shortened option name should be used
# cmdline.pre = 3
_Switch(["-clustalw", "clustalw"],
"Use CLUSTALW output format instead of MFA"),
_Option(["-c", "c", "--consistency", "consistency"],
"Use 0 <= REPS <= 5 (default: 2) passes of consistency transformation",
checker_function=lambda x: x in range(0, 6),
equate=False),
_Option(["-ir", "--iterative-refinement", "iterative-refinement", "ir"],
"Use 0 <= REPS <= 1000 (default: 100) passes of "
"iterative-refinement",
checker_function=lambda x: x in range(0, 1001),
equate=False),
_Option(["-pre", "--pre-training", "pre-training", "pre"],
"Use 0 <= REPS <= 20 (default: 0) rounds of pretraining",
checker_function=lambda x: x in range(0, 21),
equate=False),
_Switch(["-pairs", "pairs"],
"Generate all-pairs pairwise alignments"),
_Switch(["-viterbi", "viterbi"],
"Use Viterbi algorithm to generate all pairs "
"(automatically enables -pairs)"),
_Switch(["-verbose", "verbose"],
"Report progress while aligning (default: off)"),
_Option(["-annot", "annot"],
"Write annotation for multiple alignment to FILENAME",
equate=False),
_Option(["-t", "t", "--train", "train"],
"Compute EM transition probabilities, store in FILENAME "
"(default: no training)",
equate=False),
_Switch(["-e", "e", "--emissions", "emissions"],
"Also reestimate emission probabilities (default: off)"),
_Option(["-p", "p", "--paramfile", "paramfile"],
"Read parameters from FILENAME",
equate=False),
_Switch(["-a", "--alignment-order", "alignment-order", "a"],
"Print sequences in alignment order rather than input "
"order (default: off)"),
# Input file name
_Argument(["input"],
"Input file name. Must be multiple FASTA alignment "+
"(MFA) format",
filename=True,
is_required=True),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
def _test():
"""Run the module's doctests (PRIVATE)."""
print("Running modules doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Align/Applications/_Probcons.py
|
Python
|
gpl-2.0
| 5,281
|
[
"Biopython"
] |
ddd66d418bd222f3d57343b94b2ce05418a892b3080d4a6bf302ca3852e9955c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.